|
 |
63b4e9 |
diff --git a/clock.c b/clock.c
|
|
 |
63b4e9 |
index 0615187..6237bcf 100644
|
|
 |
63b4e9 |
--- a/clock.c
|
|
 |
63b4e9 |
+++ b/clock.c
|
|
 |
63b4e9 |
@@ -19,6 +19,7 @@
|
|
 |
63b4e9 |
#include <errno.h>
|
|
 |
63b4e9 |
#include <time.h>
|
|
 |
63b4e9 |
#include <linux/net_tstamp.h>
|
|
 |
63b4e9 |
+#include <math.h>
|
|
 |
63b4e9 |
#include <poll.h>
|
|
 |
63b4e9 |
#include <stdlib.h>
|
|
 |
63b4e9 |
#include <string.h>
|
|
 |
63b4e9 |
@@ -48,6 +49,8 @@
|
|
 |
63b4e9 |
#define N_CLOCK_PFD (N_POLLFD + 1) /* one extra per port, for the fault timer */
|
|
 |
63b4e9 |
#define POW2_41 ((double)(1ULL << 41))
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
+#define OFFSETS_ARRAY_SIZE 30
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
struct interface {
|
|
 |
63b4e9 |
STAILQ_ENTRY(interface) list;
|
|
 |
63b4e9 |
};
|
|
 |
63b4e9 |
@@ -114,6 +117,32 @@ struct clock {
|
|
 |
63b4e9 |
int utc_offset;
|
|
 |
63b4e9 |
int time_flags; /* grand master role */
|
|
 |
63b4e9 |
int time_source; /* grand master role */
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
+ int64_t min_offset_locked;
|
|
 |
63b4e9 |
+ int64_t max_freq_change;
|
|
 |
63b4e9 |
+ int64_t max_offset_locked;
|
|
 |
63b4e9 |
+ int max_offset_skipped_count;
|
|
 |
63b4e9 |
+ int offset_skipped_count;
|
|
 |
63b4e9 |
+ int64_t max_offset_locked_init;
|
|
 |
63b4e9 |
+ int64_t offsets_array[OFFSETS_ARRAY_SIZE];
|
|
 |
63b4e9 |
+ double freqs_array[OFFSETS_ARRAY_SIZE];
|
|
 |
63b4e9 |
+ int offset_ptr;
|
|
 |
63b4e9 |
+ int large_offset_ptr;
|
|
 |
63b4e9 |
+ int offset_count;
|
|
 |
63b4e9 |
+ int64_t offset_stdev;
|
|
 |
63b4e9 |
+ int64_t offset_sigma_sq;
|
|
 |
63b4e9 |
+ int offset_stdev_factor;
|
|
 |
63b4e9 |
+ int64_t offset_mean; // this should be close to 0. We can raise warning if it is becoming too high.
|
|
 |
63b4e9 |
+ int freq_ptr;
|
|
 |
63b4e9 |
+ int freq_count;
|
|
 |
63b4e9 |
+ double freq_stdev;
|
|
 |
63b4e9 |
+ double freq_sigma_sq;
|
|
 |
63b4e9 |
+ int freq_stdev_factor;
|
|
 |
63b4e9 |
+ double freq_mean;
|
|
 |
63b4e9 |
+ tmv_t last_correction;
|
|
 |
63b4e9 |
+ int64_t min_offset_stddev;
|
|
 |
63b4e9 |
+ double min_offset_freq_mean;
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
UInteger8 clock_class_threshold;
|
|
 |
63b4e9 |
UInteger8 max_steps_removed;
|
|
 |
63b4e9 |
enum servo_state servo_state;
|
|
 |
63b4e9 |
@@ -145,6 +174,7 @@ static void handle_state_decision_event(struct clock *c);
|
|
 |
63b4e9 |
static int clock_resize_pollfd(struct clock *c, int new_nports);
|
|
 |
63b4e9 |
static void clock_remove_port(struct clock *c, struct port *p);
|
|
 |
63b4e9 |
static void clock_stats_display(struct clock_stats *s);
|
|
 |
63b4e9 |
+static void clock_synchronize_locked(struct clock *c, double adj);
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
static void remove_subscriber(struct clock_subscriber *s)
|
|
 |
63b4e9 |
{
|
|
 |
63b4e9 |
@@ -270,6 +300,10 @@ void clock_destroy(struct clock *c)
|
|
 |
63b4e9 |
{
|
|
 |
63b4e9 |
struct port *p, *tmp;
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
+ /* set last known mean freq on destroy */
|
|
 |
63b4e9 |
+ if (c->min_offset_stddev != INT64_MAX)
|
|
 |
63b4e9 |
+ clock_synchronize_locked(c, c->min_offset_freq_mean);
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
interface_destroy(c->uds_rw_if);
|
|
 |
63b4e9 |
interface_destroy(c->uds_ro_if);
|
|
 |
63b4e9 |
clock_flush_subscriptions(c);
|
|
 |
63b4e9 |
@@ -1105,6 +1139,28 @@ struct clock *clock_create(enum clock_type type, struct config *config,
|
|
 |
63b4e9 |
c->time_source = config_get_int(config, NULL, "timeSource");
|
|
 |
63b4e9 |
c->step_window = config_get_int(config, NULL, "step_window");
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
+ c->min_offset_locked = config_get_int(config, NULL, "min_offset_locked");
|
|
 |
63b4e9 |
+ c->max_freq_change = config_get_int(config, NULL, "max_freq_change");
|
|
 |
63b4e9 |
+ c->max_offset_skipped_count = config_get_int(config, NULL, "max_offset_skipped_count");
|
|
 |
63b4e9 |
+ c->max_offset_locked_init = config_get_int(config, NULL, "max_offset_locked_init");
|
|
 |
63b4e9 |
+ c->offset_stdev_factor = config_get_int(config, NULL, "offset_stdev_factor");
|
|
 |
63b4e9 |
+ c->freq_stdev_factor = config_get_int(config, NULL, "freq_stdev_factor");
|
|
 |
63b4e9 |
+ c->offset_ptr = 0;
|
|
 |
63b4e9 |
+ c->large_offset_ptr = 0;
|
|
 |
63b4e9 |
+ c->offset_count = 0;
|
|
 |
63b4e9 |
+ c->offset_stdev = 0;
|
|
 |
63b4e9 |
+ c->offset_sigma_sq = 0;
|
|
 |
63b4e9 |
+ c->offset_mean = 0;
|
|
 |
63b4e9 |
+ c->freq_ptr = 0;
|
|
 |
63b4e9 |
+ c->freq_count = 0;
|
|
 |
63b4e9 |
+ c->freq_stdev = 0;
|
|
 |
63b4e9 |
+ c->freq_sigma_sq = 0;
|
|
 |
63b4e9 |
+ c->freq_mean = 0;
|
|
 |
63b4e9 |
+ c->offset_skipped_count = 0;
|
|
 |
63b4e9 |
+ c->last_correction = tmv_zero();
|
|
 |
63b4e9 |
+ c->min_offset_freq_mean = 0;
|
|
 |
63b4e9 |
+ c->min_offset_stddev = INT64_MAX;
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
if (c->free_running) {
|
|
 |
63b4e9 |
c->clkid = CLOCK_INVALID;
|
|
 |
63b4e9 |
if (timestamping == TS_SOFTWARE || timestamping == TS_LEGACY_HW) {
|
|
 |
63b4e9 |
@@ -1798,11 +1854,77 @@ static void clock_synchronize_locked(struct clock *c, double adj)
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
+// update the sigma_sq and mean and pointer
|
|
 |
63b4e9 |
+void init_clock_spike_filter(struct clock *c){
|
|
 |
63b4e9 |
+ c->offset_ptr = 0;
|
|
 |
63b4e9 |
+ c->offset_count = 0;
|
|
 |
63b4e9 |
+ c->offset_stdev = 0;
|
|
 |
63b4e9 |
+ c->offset_sigma_sq = 0;
|
|
 |
63b4e9 |
+ c->offset_mean = 0;
|
|
 |
63b4e9 |
+ c->freq_ptr = 0;
|
|
 |
63b4e9 |
+ c->freq_count = 0;
|
|
 |
63b4e9 |
+ c->freq_stdev = 0;
|
|
 |
63b4e9 |
+ c->freq_sigma_sq = 0;
|
|
 |
63b4e9 |
+ c->freq_mean = 0;
|
|
 |
63b4e9 |
+ c->offset_skipped_count = 0;
|
|
 |
63b4e9 |
+ pr_notice("Reset spike filter variables");
|
|
 |
63b4e9 |
+}
|
|
 |
63b4e9 |
+void update_clock_offset_stats(struct clock *c, int64_t offset)
|
|
 |
63b4e9 |
+{
|
|
 |
63b4e9 |
+ if (c->offset_count < OFFSETS_ARRAY_SIZE){
|
|
 |
63b4e9 |
+ c->offset_mean = (c->offset_mean*c->offset_count+offset)/(c->offset_count+1);
|
|
 |
63b4e9 |
+ c->offset_sigma_sq = c->offset_sigma_sq + pow(offset,2);
|
|
 |
63b4e9 |
+ c->offset_stdev = sqrt(c->offset_sigma_sq/(c->offset_count+1));
|
|
 |
63b4e9 |
+ } else{
|
|
 |
63b4e9 |
+ c->offset_ptr = c->offset_ptr % OFFSETS_ARRAY_SIZE;
|
|
 |
63b4e9 |
+ c->offset_mean = (c->offset_mean * OFFSETS_ARRAY_SIZE - c->offsets_array[c->offset_ptr]+offset)/OFFSETS_ARRAY_SIZE;
|
|
 |
63b4e9 |
+ c->offset_sigma_sq = c->offset_sigma_sq - pow(c->offsets_array[c->offset_ptr],2) + pow(offset,2);
|
|
 |
63b4e9 |
+ c->offset_stdev = sqrt(c->offset_sigma_sq/OFFSETS_ARRAY_SIZE);
|
|
 |
63b4e9 |
+ if (c->offset_stdev > 0 && c->offset_stdev < c->min_offset_stddev)
|
|
 |
63b4e9 |
+ c->min_offset_stddev = c->offset_stdev;
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ if (c->offset_stdev < 0) {
|
|
 |
63b4e9 |
+ init_clock_spike_filter(c);
|
|
 |
63b4e9 |
+ return;
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ c->offsets_array[c->offset_ptr] = offset;
|
|
 |
63b4e9 |
+ c->offset_count+=1;
|
|
 |
63b4e9 |
+ c->offset_ptr+=1;
|
|
 |
63b4e9 |
+}
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
+void update_clock_freq_stats(struct clock *c, double freq)
|
|
 |
63b4e9 |
+{
|
|
 |
63b4e9 |
+ if (c->freq_count < OFFSETS_ARRAY_SIZE){
|
|
 |
63b4e9 |
+ c->freq_mean = (c->freq_mean*c->freq_count+freq)/(c->freq_count+1);
|
|
 |
63b4e9 |
+ c->freq_sigma_sq = c->freq_sigma_sq + pow(freq,2);
|
|
 |
63b4e9 |
+ c->freq_stdev = sqrt(c->freq_sigma_sq/(c->freq_count+1) - pow(c->freq_mean,2));
|
|
 |
63b4e9 |
+ } else{
|
|
 |
63b4e9 |
+ c->freq_ptr = c->freq_ptr % OFFSETS_ARRAY_SIZE;
|
|
 |
63b4e9 |
+ c->freq_mean = (c->freq_mean * OFFSETS_ARRAY_SIZE - c->freqs_array[c->freq_ptr]+freq)/OFFSETS_ARRAY_SIZE;
|
|
 |
63b4e9 |
+ c->freq_sigma_sq = c->freq_sigma_sq - pow(c->freqs_array[c->freq_ptr],2) + pow(freq,2);
|
|
 |
63b4e9 |
+ c->freq_stdev = sqrt(c->freq_sigma_sq/OFFSETS_ARRAY_SIZE - pow(c->freq_mean,2));
|
|
 |
63b4e9 |
+ if (c->offset_stdev == c->min_offset_stddev) {
|
|
 |
63b4e9 |
+ c->min_offset_freq_mean = c->freq_mean;
|
|
 |
63b4e9 |
+ pr_notice("Best offset stddev = %ld, new mean freq = %lf", c->min_offset_stddev, c->min_offset_freq_mean);
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ c->freqs_array[c->freq_ptr] = freq;
|
|
 |
63b4e9 |
+ c->freq_count+=1;
|
|
 |
63b4e9 |
+ c->freq_ptr+=1;
|
|
 |
63b4e9 |
+ c->last_correction = c->ingress_ts;
|
|
 |
63b4e9 |
+}
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
+int64_t max_func(int64_t num1, int64_t num2)
|
|
 |
63b4e9 |
+{
|
|
 |
63b4e9 |
+ return (num1 > num2 ) ? num1 : num2;
|
|
 |
63b4e9 |
+}
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
|
|
 |
63b4e9 |
{
|
|
 |
63b4e9 |
enum servo_state state = SERVO_UNLOCKED;
|
|
 |
63b4e9 |
double adj, weight;
|
|
 |
63b4e9 |
- int64_t offset;
|
|
 |
63b4e9 |
+ tmv_t master_offset;
|
|
 |
63b4e9 |
+ int64_t offset, unsync_seconds;
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
if (c->step_window_counter) {
|
|
 |
63b4e9 |
c->step_window_counter--;
|
|
 |
63b4e9 |
@@ -1816,7 +1938,7 @@ enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
tsproc_down_ts(c->tsproc, origin, ingress);
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
- if (tsproc_update_offset(c->tsproc, &c->master_offset, &weight)) {
|
|
 |
63b4e9 |
+ if (tsproc_update_offset(c->tsproc, &master_offset, &weight)) {
|
|
 |
63b4e9 |
if (c->free_running) {
|
|
 |
63b4e9 |
return clock_no_adjust(c, ingress, origin);
|
|
 |
63b4e9 |
} else {
|
|
 |
63b4e9 |
@@ -1824,6 +1946,60 @@ enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
+ offset = tmv_to_nanoseconds(master_offset);
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
+ if (c->servo_state == SERVO_LOCKED) {
|
|
 |
63b4e9 |
+ pr_debug("mean freq: %lf", c->min_offset_freq_mean);
|
|
 |
63b4e9 |
+ if (c->offset_count < OFFSETS_ARRAY_SIZE){
|
|
 |
63b4e9 |
+ c->offset_skipped_count = 0;
|
|
 |
63b4e9 |
+ // update the statistics of the clock
|
|
 |
63b4e9 |
+ update_clock_offset_stats(c, offset);
|
|
 |
63b4e9 |
+ } else {
|
|
 |
63b4e9 |
+ // the last term is assuming that we have freq error RATE difference meanining that the freq is increasing max_freq_change every 1s.
|
|
 |
63b4e9 |
+ // the middle term is assuming that at the time that we got bad ingress sync packet, we have a frequency error of (c->freq_stdev_factor*c->freq_stdev)
|
|
 |
63b4e9 |
+ c->max_offset_locked = c->offset_stdev_factor * c->offset_stdev;
|
|
 |
63b4e9 |
+ unsync_seconds = (tmv_to_nanoseconds(tmv_sub(c->ingress_ts, c->last_correction)) / NS_PER_SEC);
|
|
 |
63b4e9 |
+ if (unsync_seconds > 5 || unsync_seconds < 0) {
|
|
 |
63b4e9 |
+ pr_notice("seconds without sync: %ld", unsync_seconds);
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ c->max_offset_locked += unsync_seconds * c->freq_stdev_factor * ((int64_t) floor(c->freq_stdev)) + (c->max_freq_change/2) * pow(unsync_seconds,2);
|
|
 |
63b4e9 |
+ // Overflow protection. Sometimes window grows too big resulting in ptp4l entering a limbo state
|
|
 |
63b4e9 |
+ if (c->max_offset_locked < 0) {
|
|
 |
63b4e9 |
+ pr_notice("max_offset_locked: %ld, offset_stdev_factor: %d, offset_stdev: %ld", c->max_offset_locked, c->offset_stdev_factor, c->offset_stdev);
|
|
 |
63b4e9 |
+ pr_notice("unsync_seconds: %ld, freq_stdev_factor: %d, freq_stdev: %lf, max_freq_change: %ld", unsync_seconds, c->freq_stdev_factor, c->freq_stdev, c->max_freq_change);
|
|
 |
63b4e9 |
+ c->servo_state = SERVO_UNLOCKED;
|
|
 |
63b4e9 |
+ return c->servo_state;
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
+ bool is_spike = llabs(offset) > llabs(max_func(c->max_offset_locked, c->min_offset_locked));
|
|
 |
63b4e9 |
+ if (is_spike) {
|
|
 |
63b4e9 |
+ adj = c->min_offset_freq_mean;
|
|
 |
63b4e9 |
+ c->master_offset = nanoseconds_to_tmv(c->max_offset_locked);
|
|
 |
63b4e9 |
+ pr_notice("spike detected => max_offset_locked: %ld, setting offset to min_offset_freq_mean: %lf", c->max_offset_locked, adj);
|
|
 |
63b4e9 |
+ clock_synchronize_locked(c, adj);
|
|
 |
63b4e9 |
+ if (c->offset_skipped_count < c->max_offset_skipped_count) {
|
|
 |
63b4e9 |
+ c->offset_skipped_count++;
|
|
 |
63b4e9 |
+ pr_notice("skip %d/%d large offset (>%ld) %ld", c->offset_skipped_count,
|
|
 |
63b4e9 |
+ c->max_offset_skipped_count, c->max_offset_locked, offset);
|
|
 |
63b4e9 |
+ // we should consider changing freq to the best mean in case of spike
|
|
 |
63b4e9 |
+ return c->servo_state;
|
|
 |
63b4e9 |
+ } else {
|
|
 |
63b4e9 |
+ // I am not totally sure if we should go to unlocked case or not. It may be better to just keep track of how many we missed.
|
|
 |
63b4e9 |
+ c->servo_state = SERVO_UNLOCKED;
|
|
 |
63b4e9 |
+ return c->servo_state;
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ } else {
|
|
 |
63b4e9 |
+ pr_debug("NO spike detected => max_offset_locked: %ld", c->max_offset_locked);
|
|
 |
63b4e9 |
+ c->offset_skipped_count = 0;
|
|
 |
63b4e9 |
+ // update the statistics of the clock
|
|
 |
63b4e9 |
+ update_clock_offset_stats(c, offset);
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ } else {
|
|
 |
63b4e9 |
+ init_clock_spike_filter(c);
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
+ c->master_offset = master_offset;
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
if (clock_utc_correct(c, ingress)) {
|
|
 |
63b4e9 |
return c->servo_state;
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
@@ -1836,7 +2012,6 @@ enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
|
|
 |
63b4e9 |
return state;
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
- offset = tmv_to_nanoseconds(c->master_offset);
|
|
 |
63b4e9 |
if (offset * tmv_sign(c->master_offset) > 10000) {
|
|
 |
63b4e9 |
tsproc_dump_state(c->tsproc);
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
@@ -1863,6 +2038,7 @@ enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
|
|
 |
63b4e9 |
break;
|
|
 |
63b4e9 |
case SERVO_LOCKED:
|
|
 |
63b4e9 |
clock_synchronize_locked(c, adj);
|
|
 |
63b4e9 |
+ update_clock_freq_stats(c, adj);
|
|
 |
63b4e9 |
break;
|
|
 |
63b4e9 |
case SERVO_LOCKED_STABLE:
|
|
 |
63b4e9 |
if (c->write_phase_mode) {
|
|
 |
63b4e9 |
@@ -1871,6 +2047,7 @@ enum servo_state clock_synchronize(struct clock *c, tmv_t ingress, tmv_t origin)
|
|
 |
63b4e9 |
} else {
|
|
 |
63b4e9 |
clock_synchronize_locked(c, adj);
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
+ update_clock_freq_stats(c, adj);
|
|
 |
63b4e9 |
break;
|
|
 |
63b4e9 |
}
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
@@ -2025,6 +2202,10 @@ static void handle_state_decision_event(struct clock *c)
|
|
 |
63b4e9 |
if (cid_eq(&best_id, &c->dds.clockIdentity)) {
|
|
 |
63b4e9 |
pr_notice("selected local clock %s as best master",
|
|
 |
63b4e9 |
cid2str(&best_id));
|
|
 |
63b4e9 |
+ // let's set estimated mean freq while we are free running
|
|
 |
63b4e9 |
+ if (c->min_offset_stddev != INT64_MAX) {
|
|
 |
63b4e9 |
+ clockadj_set_freq(c->clkid, -c->min_offset_freq_mean);
|
|
 |
63b4e9 |
+ }
|
|
 |
63b4e9 |
} else {
|
|
 |
63b4e9 |
pr_notice("selected best master clock %s",
|
|
 |
63b4e9 |
cid2str(&best_id));
|
|
 |
63b4e9 |
diff --git a/clock.h b/clock.h
|
|
 |
63b4e9 |
index 17b2e3b..f86229b 100644
|
|
 |
63b4e9 |
--- a/clock.h
|
|
 |
63b4e9 |
+++ b/clock.h
|
|
 |
63b4e9 |
@@ -361,6 +361,8 @@ struct timePropertiesDS clock_time_properties(struct clock *c);
|
|
 |
63b4e9 |
*/
|
|
 |
63b4e9 |
void clock_update_time_properties(struct clock *c, struct timePropertiesDS tds);
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
+void init_clock_spike_filter(struct clock *c);
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
/**
|
|
 |
63b4e9 |
* Obtain a clock's description.
|
|
 |
63b4e9 |
* @param c The clock instance.
|
|
 |
63b4e9 |
diff --git a/config.c b/config.c
|
|
 |
63b4e9 |
index 747a735..718d880 100644
|
|
 |
63b4e9 |
--- a/config.c
|
|
 |
63b4e9 |
+++ b/config.c
|
|
 |
63b4e9 |
@@ -346,6 +346,13 @@ struct config_item config_tab[] = {
|
|
 |
63b4e9 |
GLOB_ITEM_INT("utc_offset", CURRENT_UTC_OFFSET, 0, INT_MAX),
|
|
 |
63b4e9 |
GLOB_ITEM_INT("verbose", 0, 0, 1),
|
|
 |
63b4e9 |
GLOB_ITEM_INT("write_phase_mode", 0, 0, 1),
|
|
 |
63b4e9 |
+
|
|
 |
63b4e9 |
+ GLOB_ITEM_INT("max_freq_change", 20, 0, INT_MAX),
|
|
 |
63b4e9 |
+ GLOB_ITEM_INT("max_offset_skipped_count", 15, 0, INT_MAX),
|
|
 |
63b4e9 |
+ GLOB_ITEM_INT("max_offset_locked_init", 500000, 0, INT_MAX),
|
|
 |
63b4e9 |
+ GLOB_ITEM_INT("offset_stdev_factor", 3, 0, INT_MAX),
|
|
 |
63b4e9 |
+ GLOB_ITEM_INT("freq_stdev_factor", 3, 0, INT_MAX),
|
|
 |
63b4e9 |
+ GLOB_ITEM_INT("min_offset_locked", 15000, 0, INT_MAX),
|
|
 |
63b4e9 |
};
|
|
 |
63b4e9 |
|
|
 |
63b4e9 |
static struct unicast_master_table *current_uc_mtab;
|