+ *schedule_smooth = 1;
+ }
+ }
+ if (rrd_test_error())
+ return -1;
+
+ if (update_cdp_prep
+ (rrd, elapsed_pdp_st, start_pdp_offset, rra_step_cnt, rra_idx,
+ pdp_temp, *last_seasonal_coef, *seasonal_coef,
+ current_cf) == -1) {
+ return -1;
+ }
+ rra_start +=
+ rrd->rra_def[rra_idx].row_cnt * rrd->stat_head->ds_cnt *
+ sizeof(rrd_value_t);
+ }
+ return 0;
+}
+
+/*
+ * Are we due for a smooth? Also increments our position in the burn-in cycle.
+ */
+static int do_schedule_smooth(
+ rrd_t *rrd,
+ unsigned long rra_idx,
+ unsigned long elapsed_pdp_st)
+{
+ unsigned long cdp_idx = rra_idx * (rrd->stat_head->ds_cnt);
+ unsigned long cur_row = rrd->rra_ptr[rra_idx].cur_row;
+ unsigned long row_cnt = rrd->rra_def[rra_idx].row_cnt;
+ unsigned long seasonal_smooth_idx =
+ rrd->rra_def[rra_idx].par[RRA_seasonal_smooth_idx].u_cnt;
+ unsigned long *init_seasonal =
+ &(rrd->cdp_prep[cdp_idx].scratch[CDP_init_seasonal].u_cnt);
+
+ /* Need to use first cdp parameter buffer to track burnin (burnin requires
+ * a specific smoothing schedule). The CDP_init_seasonal parameter is
+ * really an RRA level, not a data source within RRA level parameter, but
+ * the rra_def is read only for rrd_update (not flushed to disk). */
+ if (*init_seasonal > BURNIN_CYCLES) {
+ /* someone has no doubt invented a trick to deal with this wrap around,
+ * but at least this code is clear. */
+ if (seasonal_smooth_idx > cur_row) {
+ /* here elapsed_pdp_st = rra_step_cnt[rra_idx] because of 1-1 mapping
+ * between PDP and CDP */
+ return (cur_row + elapsed_pdp_st >= seasonal_smooth_idx);
+ }
+ /* can't rely on negative numbers because we are working with
+ * unsigned values */
+ return (cur_row + elapsed_pdp_st >= row_cnt
+ && cur_row + elapsed_pdp_st >= row_cnt + seasonal_smooth_idx);
+ }
+ /* mark off one of the burn-in cycles */
+ return (cur_row + elapsed_pdp_st >= row_cnt && ++(*init_seasonal));
+}
+
+/*
+ * For a given RRA, iterate over the data sources and call the appropriate
+ * consolidation function.
+ *
+ * Returns 0 on success, -1 on error.
+ */
+static int update_cdp_prep(
+ rrd_t *rrd,
+ unsigned long elapsed_pdp_st,
+ unsigned long start_pdp_offset,
+ unsigned long *rra_step_cnt,
+ int rra_idx,
+ rrd_value_t *pdp_temp,
+ rrd_value_t *last_seasonal_coef,
+ rrd_value_t *seasonal_coef,
+ int current_cf)
+{
+ unsigned long ds_idx, cdp_idx;
+
+ /* update CDP_PREP areas */
+ /* loop over data soures within each RRA */
+ for (ds_idx = 0; ds_idx < rrd->stat_head->ds_cnt; ds_idx++) {
+
+ cdp_idx = rra_idx * rrd->stat_head->ds_cnt + ds_idx;
+
+ if (rrd->rra_def[rra_idx].pdp_cnt > 1) {
+ update_cdp(rrd->cdp_prep[cdp_idx].scratch, current_cf,
+ pdp_temp[ds_idx], rra_step_cnt[rra_idx],
+ elapsed_pdp_st, start_pdp_offset,
+ rrd->rra_def[rra_idx].pdp_cnt,
+ rrd->rra_def[rra_idx].par[RRA_cdp_xff_val].u_val,
+ rra_idx, ds_idx);
+ } else {
+ /* Nothing to consolidate if there's one PDP per CDP. However, if
+ * we've missed some PDPs, let's update null counters etc. */
+ if (elapsed_pdp_st > 2) {
+ reset_cdp(rrd, elapsed_pdp_st, pdp_temp, last_seasonal_coef,
+ seasonal_coef, rra_idx, ds_idx, cdp_idx,
+ current_cf);
+ }
+ }
+
+ if (rrd_test_error())
+ return -1;
+ } /* endif data sources loop */
+ return 0;
+}
+
+/*
+ * Given the new reading (pdp_temp_val), update or initialize the CDP value,
+ * primary value, secondary value, and # of unknowns.
+ */
+static void update_cdp(
+ unival *scratch,
+ int current_cf,
+ rrd_value_t pdp_temp_val,
+ unsigned long rra_step_cnt,
+ unsigned long elapsed_pdp_st,
+ unsigned long start_pdp_offset,
+ unsigned long pdp_cnt,
+ rrd_value_t xff,
+ int i,
+ int ii)
+{
+ /* shorthand variables */
+ rrd_value_t *cdp_val = &scratch[CDP_val].u_val;
+ rrd_value_t *cdp_primary_val = &scratch[CDP_primary_val].u_val;
+ rrd_value_t *cdp_secondary_val = &scratch[CDP_secondary_val].u_val;
+ unsigned long *cdp_unkn_pdp_cnt = &scratch[CDP_unkn_pdp_cnt].u_cnt;
+
+ if (rra_step_cnt) {
+ /* If we are in this block, as least 1 CDP value will be written to
+ * disk, this is the CDP_primary_val entry. If more than 1 value needs
+ * to be written, then the "fill in" value is the CDP_secondary_val
+ * entry. */
+ if (isnan(pdp_temp_val)) {
+ *cdp_unkn_pdp_cnt += start_pdp_offset;
+ *cdp_secondary_val = DNAN;
+ } else {
+ /* CDP_secondary value is the RRA "fill in" value for intermediary
+ * CDP data entries. No matter the CF, the value is the same because
+ * the average, max, min, and last of a list of identical values is
+ * the same, namely, the value itself. */
+ *cdp_secondary_val = pdp_temp_val;
+ }
+
+ if (*cdp_unkn_pdp_cnt > pdp_cnt * xff) {
+ *cdp_primary_val = DNAN;
+ if (current_cf == CF_AVERAGE) {
+ *cdp_val =
+ initialize_average_carry_over(pdp_temp_val,
+ elapsed_pdp_st,
+ start_pdp_offset, pdp_cnt);
+ } else {
+ *cdp_val = pdp_temp_val;
+ }
+ } else {
+ initialize_cdp_val(scratch, current_cf, pdp_temp_val,
+ elapsed_pdp_st, start_pdp_offset, pdp_cnt);
+ } /* endif meets xff value requirement for a valid value */
+ /* initialize carry over CDP_unkn_pdp_cnt, this must after CDP_primary_val
+ * is set because CDP_unkn_pdp_cnt is required to compute that value. */
+ if (isnan(pdp_temp_val))
+ *cdp_unkn_pdp_cnt = (elapsed_pdp_st - start_pdp_offset) % pdp_cnt;
+ else
+ *cdp_unkn_pdp_cnt = 0;
+ } else { /* rra_step_cnt[i] == 0 */
+