+ } else {
+ switch (current_cf) {
+ case CF_AVERAGE:
+ rrd.cdp_prep[iii].scratch[CDP_val].u_val += pdp_temp[ii] *
+ elapsed_pdp_st;
+ break;
+ case CF_MINIMUM:
+ if (pdp_temp[ii] < rrd.cdp_prep[iii].scratch[CDP_val].u_val)
+ rrd.cdp_prep[iii].scratch[CDP_val].u_val = pdp_temp[ii];
+ break;
+ case CF_MAXIMUM:
+ if (pdp_temp[ii] > rrd.cdp_prep[iii].scratch[CDP_val].u_val)
+ rrd.cdp_prep[iii].scratch[CDP_val].u_val = pdp_temp[ii];
+ break;
+ case CF_LAST:
+ default:
+ rrd.cdp_prep[iii].scratch[CDP_val].u_val = pdp_temp[ii];
+ break;
+ }
+ }
+ }
+ } else { /* rrd.rra_def[i].pdp_cnt == 1 */
+ if (elapsed_pdp_st > 2)
+ {
+ switch (current_cf) {
+ case CF_AVERAGE:
+ default:
+ rrd.cdp_prep[iii].scratch[CDP_primary_val].u_val=pdp_temp[ii];
+ rrd.cdp_prep[iii].scratch[CDP_secondary_val].u_val=pdp_temp[ii];
+ break;
+ case CF_SEASONAL:
+ case CF_DEVSEASONAL:
+ /* need to update cached seasonal values, so they are consistent
+ * with the bulk update */
+ /* WARNING: code relies on the fact that CDP_hw_last_seasonal and
+ * CDP_last_deviation are the same. */
+ rrd.cdp_prep[iii].scratch[CDP_hw_last_seasonal].u_val =
+ last_seasonal_coef[ii];
+ rrd.cdp_prep[iii].scratch[CDP_hw_seasonal].u_val =
+ seasonal_coef[ii];
+ break;
+ case CF_HWPREDICT:
+ /* need to update the null_count and last_null_count.
+ * even do this for non-DNAN pdp_temp because the
+ * algorithm is not learning from batch updates. */
+ rrd.cdp_prep[iii].scratch[CDP_null_count].u_cnt +=
+ elapsed_pdp_st;
+ rrd.cdp_prep[iii].scratch[CDP_last_null_count].u_cnt +=
+ elapsed_pdp_st - 1;
+ /* fall through */
+ case CF_DEVPREDICT:
+ rrd.cdp_prep[iii].scratch[CDP_primary_val].u_val = DNAN;
+ rrd.cdp_prep[iii].scratch[CDP_secondary_val].u_val = DNAN;
+ break;
+ case CF_FAILURES:
+ /* do not count missed bulk values as failures */
+ rrd.cdp_prep[iii].scratch[CDP_primary_val].u_val = 0;
+ rrd.cdp_prep[iii].scratch[CDP_secondary_val].u_val = 0;
+ /* need to reset violations buffer.
+ * could do this more carefully, but for now, just
+ * assume a bulk update wipes away all violations. */
+ erase_violations(&rrd, iii, i);
+ break;
+ }
+ }
+ } /* endif rrd.rra_def[i].pdp_cnt == 1 */
+
+ if (rrd_test_error()) break;
+
+ } /* endif data sources loop */
+ } /* end RRA Loop */
+
+ /* this loop is only entered if elapsed_pdp_st < 3 */
+ for (j = elapsed_pdp_st, scratch_idx = CDP_primary_val;
+ j > 0 && j < 3; j--, scratch_idx = CDP_secondary_val)
+ {
+ for(i = 0, rra_start = rra_begin;
+ i < rrd.stat_head->rra_cnt;
+ rra_start += rrd.rra_def[i].row_cnt * rrd.stat_head -> ds_cnt * sizeof(rrd_value_t),
+ i++)
+ {
+ if (rrd.rra_def[i].pdp_cnt > 1) continue;
+
+ current_cf = cf_conv(rrd.rra_def[i].cf_nam);
+ if (current_cf == CF_SEASONAL || current_cf == CF_DEVSEASONAL)
+ {
+ lookup_seasonal(&rrd,i,rra_start,rrd_file,
+ elapsed_pdp_st + (scratch_idx == CDP_primary_val ? 1 : 2),
+ &seasonal_coef);
+ rra_current = ftell(rrd_file);
+ }
+ if (rrd_test_error()) break;
+ /* loop over data soures within each RRA */
+ for(ii = 0;
+ ii < rrd.stat_head->ds_cnt;
+ ii++)
+ {
+ update_aberrant_CF(&rrd,pdp_temp[ii],current_cf,
+ i*(rrd.stat_head->ds_cnt) + ii,i,ii,
+ scratch_idx, seasonal_coef);
+ }
+ } /* end RRA Loop */
+ if (rrd_test_error()) break;
+ } /* end elapsed_pdp_st loop */
+
+ if (rrd_test_error()) break;
+
+ /* Ready to write to disk */
+ /* Move sequentially through the file, writing one RRA at a time.
+ * Note this architecture divorces the computation of CDP with
+ * flushing updated RRA entries to disk. */
+ for(i = 0, rra_start = rra_begin;
+ i < rrd.stat_head->rra_cnt;
+ rra_start += rrd.rra_def[i].row_cnt * rrd.stat_head -> ds_cnt * sizeof(rrd_value_t),
+ i++) {
+ /* is there anything to write for this RRA? If not, continue. */
+ if (rra_step_cnt[i] == 0) continue;
+
+ /* write the first row */