2 * collectd - src/ceph.c
3 * Copyright (C) 2011 New Dream Network
4 * Copyright (C) 2015 Florian octo Forster
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; only version 2 of the License is applicable.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Colin McCabe <cmccabe at alumni.cmu.edu>
21 * Dennis Zou <yunzou at cisco.com>
22 * Dan Ryder <daryder at cisco.com>
23 * Florian octo Forster <octo at collectd.org>
26 #define _DEFAULT_SOURCE
34 #include <arpa/inet.h>
37 #include <yajl/yajl_parse.h>
38 #if HAVE_YAJL_YAJL_VERSION_H
39 #include <yajl/yajl_version.h>
41 #ifdef HAVE_SYS_CAPABILITY_H
42 #include <sys/capability.h>
55 #include <sys/types.h>
59 #define RETRY_AVGCOUNT -1
61 #if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
62 #define HAVE_YAJL_V2 1
65 #define RETRY_ON_EINTR(ret, expr) \
75 /** Timeout interval in seconds */
76 #define CEPH_TIMEOUT_INTERVAL 1
78 /** Maximum path length for a UNIX domain socket on this system */
79 #define UNIX_DOMAIN_SOCK_PATH_MAX (sizeof(((struct sockaddr_un *)0)->sun_path))
81 /** Yajl callback returns */
82 #define CEPH_CB_CONTINUE 1
83 #define CEPH_CB_ABORT 0
86 typedef size_t yajl_len_t;
88 typedef unsigned int yajl_len_t;
91 /** Number of types for ceph defined in types.db */
92 #define CEPH_DSET_TYPES_NUM 3
93 /** ceph types enum */
94 enum ceph_dset_type_d {
98 DSET_TYPE_UNFOUND = 1000
101 /** Valid types for ceph defined in types.db */
102 static const char *const ceph_dset_types[CEPH_DSET_TYPES_NUM] = {
103 "ceph_latency", "ceph_bytes", "ceph_rate"};
105 /******* ceph_daemon *******/
107 /** Version of the admin_socket interface */
110 char name[DATA_MAX_NAME_LEN];
112 /** Path to the socket that we use to talk to the ceph daemon */
113 char asok_path[UNIX_DOMAIN_SOCK_PATH_MAX];
115 /** Number of counters */
117 /** Track ds types */
119 /** Track ds names to match with types */
123 * Keep track of last data for latency values so we can calculate rate
126 struct last_data **last_poll_data;
127 /** index of last poll data */
131 /******* JSON parsing *******/
132 typedef int (*node_handler_t)(void *, const char *, const char *);
134 /** Track state and handler while parsing JSON */
136 node_handler_t handler;
140 char *stack[YAJL_MAX_DEPTH];
143 typedef struct yajl_struct yajl_struct;
145 enum perfcounter_type_d {
146 PERFCOUNTER_LATENCY = 0x4,
147 PERFCOUNTER_DERIVE = 0x8,
150 /** Give user option to use default (long run = since daemon started) avg */
151 static int long_run_latency_avg = 0;
154 * Give user option to use default type for special cases -
155 * filestore.journal_wr_bytes is currently only metric here. Ceph reports the
156 * type as a sum/count pair and will calculate it the same as a latency value.
157 * All other "bytes" metrics (excluding the used/capacity bytes for the OSD)
158 * use the DERIVE type. Unless user specifies to use given type, convert this
159 * metric to use DERIVE.
161 static int convert_special_metrics = 1;
163 /** Array of daemons to monitor */
164 static struct ceph_daemon **g_daemons = NULL;
166 /** Number of elements in g_daemons */
167 static size_t g_num_daemons = 0;
169 /** Next expected latency metric value */
170 enum latency_loop_state {
177 * A set of data that we build up in memory while parsing the JSON.
180 /** ceph daemon we are processing data for*/
181 struct ceph_daemon *d;
182 /** track avgcount across counters for avgcount/sum latency pairs */
184 /** current index of counters - used to get type of counter */
186 /** value we expect to get next when parsing latency metric */
187 enum latency_loop_state latency_next_metric;
189 * similar to index, but current index of latency type counters -
190 * used to get last poll data of counter
194 * values list - maintain across counters since
195 * host/plugin/plugin instance are always the same
201 * A set of count/sum pairs to keep track of latency types and get difference
202 * between this poll data and last poll data.
205 char ds_name[DATA_MAX_NAME_LEN];
210 /******* network I/O *******/
212 CSTATE_UNCONNECTED = 0,
213 CSTATE_WRITE_REQUEST,
219 enum request_type_t {
220 ASOK_REQ_VERSION = 0,
223 ASOK_REQ_NONE = 1000,
227 /** The Ceph daemon that we're talking to */
228 struct ceph_daemon *d;
231 uint32_t request_type;
233 /** The connection state */
236 /** The socket we use to talk to this daemon */
239 /** The amount of data remaining to read / write. */
242 /** Length of the JSON to read */
245 /** Buffer containing JSON data */
248 /** Keep data important to yajl processing */
249 struct yajl_struct yajl;
252 static int ceph_cb_null(void *ctx) { return CEPH_CB_CONTINUE; }
254 static int ceph_cb_boolean(void *ctx, int bool_val) { return CEPH_CB_CONTINUE; }
256 #define BUFFER_ADD(dest, src) \
258 size_t dest_size = sizeof(dest); \
259 size_t dest_len = strlen(dest); \
260 if (dest_size > dest_len) { \
261 sstrncpy((dest) + dest_len, (src), dest_size - dest_len); \
263 (dest)[dest_size - 1] = 0; \
266 static int ceph_cb_number(void *ctx, const char *number_val,
267 yajl_len_t number_len) {
268 yajl_struct *state = (yajl_struct *)ctx;
269 char buffer[number_len + 1];
270 char key[2 * DATA_MAX_NAME_LEN] = {0};
271 _Bool latency_type = 0;
274 memcpy(buffer, number_val, number_len);
275 buffer[sizeof(buffer) - 1] = '\0';
277 for (size_t i = 0; i < state->depth; i++) {
278 if (state->stack[i] == NULL)
281 if (strlen(key) != 0)
282 BUFFER_ADD(key, ".");
283 BUFFER_ADD(key, state->stack[i]);
286 /* Special case for latency metrics. */
287 if ((strcmp("avgcount", state->key) == 0) ||
288 (strcmp("sum", state->key) == 0) ||
289 (strcmp("avgtime",state->key) == 0)) {
292 /* depth >= 2 => (stack[-1] != NULL && stack[-2] != NULL) */
293 assert((state->depth < 2) || ((state->stack[state->depth - 1] != NULL) &&
294 (state->stack[state->depth - 2] != NULL)));
296 /* Super-special case for filestore.journal_wr_bytes.avgcount: For
297 * some reason, Ceph schema encodes this as a count/sum pair while all
298 * other "Bytes" data (excluding used/capacity bytes for OSD space) uses
299 * a single "Derive" type. To spare further confusion, keep this KPI as
300 * the same type of other "Bytes". Instead of keeping an "average" or
301 * "rate", use the "sum" in the pair and assign that to the derive
303 if (convert_special_metrics && (state->depth >= 2) &&
304 (strcmp("filestore", state->stack[state->depth - 2]) == 0) &&
305 (strcmp("journal_wr_bytes", state->stack[state->depth - 1]) == 0) &&
306 (strcmp("avgcount", state->key) == 0)) {
307 DEBUG("ceph plugin: Skipping avgcount for filestore.JournalWrBytes");
308 return CEPH_CB_CONTINUE;
310 } else /* not a latency type */
312 BUFFER_ADD(key, ".");
313 BUFFER_ADD(key, state->key);
316 status = state->handler(state->handler_arg, buffer, key);
317 if ((status == RETRY_AVGCOUNT) && latency_type) {
318 /* Add previously skipped part of the key, either "avgcount" or "sum",
320 BUFFER_ADD(key, ".");
321 BUFFER_ADD(key, state->key);
323 status = state->handler(state->handler_arg, buffer, key);
327 ERROR("ceph plugin: JSON handler failed with status %d.", status);
328 return CEPH_CB_ABORT;
331 return CEPH_CB_CONTINUE;
334 static int ceph_cb_string(void *ctx, const unsigned char *string_val,
335 yajl_len_t string_len) {
336 return CEPH_CB_CONTINUE;
339 static int ceph_cb_start_map(void *ctx) {
340 yajl_struct *state = (yajl_struct *)ctx;
342 /* Push key to the stack */
343 if (state->depth == YAJL_MAX_DEPTH)
344 return CEPH_CB_ABORT;
346 state->stack[state->depth] = state->key;
350 return CEPH_CB_CONTINUE;
353 static int ceph_cb_end_map(void *ctx) {
354 yajl_struct *state = (yajl_struct *)ctx;
356 /* Pop key from the stack */
357 if (state->depth == 0)
358 return CEPH_CB_ABORT;
362 state->key = state->stack[state->depth];
363 state->stack[state->depth] = NULL;
365 return CEPH_CB_CONTINUE;
368 static int ceph_cb_map_key(void *ctx, const unsigned char *key,
369 yajl_len_t string_len) {
370 yajl_struct *state = (yajl_struct *)ctx;
371 size_t sz = ((size_t)string_len) + 1;
374 state->key = malloc(sz);
375 if (state->key == NULL) {
376 ERROR("ceph plugin: malloc failed.");
377 return CEPH_CB_ABORT;
380 memmove(state->key, key, sz - 1);
381 state->key[sz - 1] = 0;
383 return CEPH_CB_CONTINUE;
386 static int ceph_cb_start_array(void *ctx) { return CEPH_CB_CONTINUE; }
388 static int ceph_cb_end_array(void *ctx) { return CEPH_CB_CONTINUE; }
390 static yajl_callbacks callbacks = {ceph_cb_null,
402 static void ceph_daemon_print(const struct ceph_daemon *d) {
403 DEBUG("ceph plugin: name=%s, asok_path=%s", d->name, d->asok_path);
406 static void ceph_daemons_print(void) {
407 for (size_t i = 0; i < g_num_daemons; ++i) {
408 ceph_daemon_print(g_daemons[i]);
412 static void ceph_daemon_free(struct ceph_daemon *d) {
413 for (int i = 0; i < d->last_idx; i++) {
414 sfree(d->last_poll_data[i]);
416 sfree(d->last_poll_data);
417 d->last_poll_data = NULL;
420 for (int i = 0; i < d->ds_num; i++) {
421 sfree(d->ds_names[i]);
428 /* compact_ds_name removed the special characters ":", "_", "-" and "+" from the
429 * intput string. Characters following these special characters are capitalized.
430 * Trailing "+" and "-" characters are replaces with the strings "Plus" and
432 static int compact_ds_name(char *buffer, size_t buffer_size, char const *src) {
436 size_t ptr_size = buffer_size;
437 _Bool append_plus = 0;
438 _Bool append_minus = 0;
440 if ((buffer == NULL) || (buffer_size <= strlen("Minus")) || (src == NULL))
443 src_copy = strdup(src);
444 src_len = strlen(src);
446 /* Remove trailing "+" and "-". */
447 if (src_copy[src_len - 1] == '+') {
450 src_copy[src_len] = 0;
451 } else if (src_copy[src_len - 1] == '-') {
454 src_copy[src_len] = 0;
457 /* Split at special chars, capitalize first character, append to buffer. */
458 char *dummy = src_copy;
460 char *save_ptr = NULL;
461 while ((token = strtok_r(dummy, ":_-+", &save_ptr)) != NULL) {
466 token[0] = toupper((int)token[0]);
468 assert(ptr_size > 1);
475 assert(len < ptr_size);
477 sstrncpy(ptr, token, len + 1);
486 /* Append "Plus" or "Minus" if "+" or "-" has been stripped above. */
487 if (append_plus || append_minus) {
488 char const *append = "Plus";
492 size_t offset = buffer_size - (strlen(append) + 1);
493 if (offset > strlen(buffer))
494 offset = strlen(buffer);
496 sstrncpy(buffer + offset, append, buffer_size - offset);
503 static _Bool has_suffix(char const *str, char const *suffix) {
504 size_t str_len = strlen(str);
505 size_t suffix_len = strlen(suffix);
508 if (suffix_len > str_len)
510 offset = str_len - suffix_len;
512 if (strcmp(str + offset, suffix) == 0)
518 /* count_parts returns the number of elements a "foo.bar.baz" style key has. */
519 static size_t count_parts(char const *key) {
520 size_t parts_num = 0;
522 for (const char *ptr = key; ptr != NULL; ptr = strchr(ptr + 1, '.'))
529 * Parse key to remove "type" if this is for schema and initiate compaction
531 static int parse_keys(char *buffer, size_t buffer_size, const char *key_str) {
532 char tmp[2 * buffer_size];
534 if (buffer == NULL || buffer_size == 0 || key_str == NULL ||
535 strlen(key_str) == 0)
538 if ((count_parts(key_str) > 2) && has_suffix(key_str, ".type")) {
539 /* strip ".type" suffix iff the key has more than two parts. */
540 size_t sz = strlen(key_str) - strlen(".type") + 1;
542 if (sz > sizeof(tmp))
544 sstrncpy(tmp, key_str, sz);
546 sstrncpy(tmp, key_str, sizeof(tmp));
549 return compact_ds_name(buffer, buffer_size, tmp);
553 * while parsing ceph admin socket schema, save counter name and type for later
556 static int ceph_daemon_add_ds_entry(struct ceph_daemon *d, const char *name,
559 char ds_name[DATA_MAX_NAME_LEN];
561 if (convert_special_metrics) {
563 * Special case for filestore:JournalWrBytes. For some reason, Ceph
564 * schema encodes this as a count/sum pair while all other "Bytes" data
565 * (excluding used/capacity bytes for OSD space) uses a single "Derive"
566 * type. To spare further confusion, keep this KPI as the same type of
567 * other "Bytes". Instead of keeping an "average" or "rate", use the
568 * "sum" in the pair and assign that to the derive value.
570 if ((strcmp(name, "filestore.journal_wr_bytes.type") == 0)) {
575 d->ds_names = realloc(d->ds_names, sizeof(char *) * (d->ds_num + 1));
580 d->ds_types = realloc(d->ds_types, sizeof(uint32_t) * (d->ds_num + 1));
585 d->ds_names[d->ds_num] = malloc(DATA_MAX_NAME_LEN);
586 if (!d->ds_names[d->ds_num]) {
590 type = (pc_type & PERFCOUNTER_DERIVE)
592 : ((pc_type & PERFCOUNTER_LATENCY) ? DSET_LATENCY : DSET_BYTES);
593 d->ds_types[d->ds_num] = type;
595 if (parse_keys(ds_name, sizeof(ds_name), name)) {
599 sstrncpy(d->ds_names[d->ds_num], ds_name, DATA_MAX_NAME_LEN - 1);
600 d->ds_num = (d->ds_num + 1);
605 /******* ceph_config *******/
606 static int cc_handle_str(struct oconfig_item_s *item, char *dest,
609 if (item->values_num != 1) {
612 if (item->values[0].type != OCONFIG_TYPE_STRING) {
615 val = item->values[0].value.string;
616 if (snprintf(dest, dest_len, "%s", val) > (dest_len - 1)) {
617 ERROR("ceph plugin: configuration parameter '%s' is too long.\n",
619 return -ENAMETOOLONG;
624 static int cc_handle_bool(struct oconfig_item_s *item, int *dest) {
625 if (item->values_num != 1) {
629 if (item->values[0].type != OCONFIG_TYPE_BOOLEAN) {
633 *dest = (item->values[0].value.boolean) ? 1 : 0;
637 static int cc_add_daemon_config(oconfig_item_t *ci) {
639 struct ceph_daemon *nd, cd = {0};
640 struct ceph_daemon **tmp;
642 if ((ci->values_num != 1) || (ci->values[0].type != OCONFIG_TYPE_STRING)) {
643 WARNING("ceph plugin: `Daemon' blocks need exactly one string "
648 ret = cc_handle_str(ci, cd.name, DATA_MAX_NAME_LEN);
653 for (int i = 0; i < ci->children_num; i++) {
654 oconfig_item_t *child = ci->children + i;
656 if (strcasecmp("SocketPath", child->key) == 0) {
657 ret = cc_handle_str(child, cd.asok_path, sizeof(cd.asok_path));
662 WARNING("ceph plugin: ignoring unknown option %s", child->key);
665 if (cd.name[0] == '\0') {
666 ERROR("ceph plugin: you must configure a daemon name.\n");
668 } else if (cd.asok_path[0] == '\0') {
669 ERROR("ceph plugin(name=%s): you must configure an administrative "
673 } else if (!((cd.asok_path[0] == '/') ||
674 (cd.asok_path[0] == '.' && cd.asok_path[1] == '/'))) {
675 ERROR("ceph plugin(name=%s): administrative socket paths must begin "
676 "with '/' or './' Can't parse: '%s'\n",
677 cd.name, cd.asok_path);
681 tmp = realloc(g_daemons, (g_num_daemons + 1) * sizeof(*g_daemons));
683 /* The positive return value here indicates that this is a
684 * runtime error, not a configuration error. */
689 nd = malloc(sizeof(*nd));
693 memcpy(nd, &cd, sizeof(*nd));
694 g_daemons[g_num_daemons] = nd;
699 static int ceph_config(oconfig_item_t *ci) {
702 for (int i = 0; i < ci->children_num; ++i) {
703 oconfig_item_t *child = ci->children + i;
704 if (strcasecmp("Daemon", child->key) == 0) {
705 ret = cc_add_daemon_config(child);
707 ERROR("ceph plugin: Couldn't allocate memory");
710 // process other daemons and ignore this one
713 } else if (strcasecmp("LongRunAvgLatency", child->key) == 0) {
714 ret = cc_handle_bool(child, &long_run_latency_avg);
718 } else if (strcasecmp("ConvertSpecialMetricTypes", child->key) == 0) {
719 ret = cc_handle_bool(child, &convert_special_metrics);
724 WARNING("ceph plugin: ignoring unknown option %s", child->key);
731 * Parse JSON and get error message if present
733 static int traverse_json(const unsigned char *json, uint32_t json_len,
735 yajl_status status = yajl_parse(hand, json, json_len);
739 case yajl_status_error:
740 msg = yajl_get_error(hand, /* verbose = */ 1,
741 /* jsonText = */ (unsigned char *)json,
742 (unsigned int)json_len);
743 ERROR("ceph plugin: yajl_parse failed: %s", msg);
744 yajl_free_error(hand, msg);
746 case yajl_status_client_canceled:
754 * Add entry for each counter while parsing schema
756 static int node_handler_define_schema(void *arg, const char *val,
758 struct ceph_daemon *d = (struct ceph_daemon *)arg;
761 return ceph_daemon_add_ds_entry(d, key, pc_type);
765 * Latency counter does not yet have an entry in last poll data - add it.
767 static int add_last(struct ceph_daemon *d, const char *ds_n, double cur_sum,
768 uint64_t cur_count) {
769 d->last_poll_data[d->last_idx] =
770 malloc(sizeof(*d->last_poll_data[d->last_idx]));
771 if (!d->last_poll_data[d->last_idx]) {
774 sstrncpy(d->last_poll_data[d->last_idx]->ds_name, ds_n,
775 sizeof(d->last_poll_data[d->last_idx]->ds_name));
776 d->last_poll_data[d->last_idx]->last_sum = cur_sum;
777 d->last_poll_data[d->last_idx]->last_count = cur_count;
778 d->last_idx = (d->last_idx + 1);
783 * Update latency counter or add new entry if it doesn't exist
785 static int update_last(struct ceph_daemon *d, const char *ds_n, int index,
786 double cur_sum, uint64_t cur_count) {
787 if ((d->last_idx > index) &&
788 (strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0)) {
789 d->last_poll_data[index]->last_sum = cur_sum;
790 d->last_poll_data[index]->last_count = cur_count;
794 if (!d->last_poll_data) {
795 d->last_poll_data = malloc(sizeof(*d->last_poll_data));
796 if (!d->last_poll_data) {
800 struct last_data **tmp_last = realloc(
801 d->last_poll_data, ((d->last_idx + 1) * sizeof(struct last_data *)));
805 d->last_poll_data = tmp_last;
807 return add_last(d, ds_n, cur_sum, cur_count);
811 * If using index guess failed (shouldn't happen, but possible if counters
812 * get rearranged), resort to searching for counter name
814 static int backup_search_for_last_avg(struct ceph_daemon *d, const char *ds_n) {
815 for (int i = 0; i < d->last_idx; i++) {
816 if (strcmp(d->last_poll_data[i]->ds_name, ds_n) == 0) {
824 * Calculate average b/t current data and last poll data
825 * if last poll data exists
827 static double get_last_avg(struct ceph_daemon *d, const char *ds_n, int index,
828 double cur_sum, uint64_t cur_count) {
829 double result = -1.1, sum_delt = 0.0;
830 uint64_t count_delt = 0;
832 if (d->last_idx > index) {
833 if (strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0) {
836 // test previous index
837 else if ((index > 0) &&
838 (strcmp(d->last_poll_data[index - 1]->ds_name, ds_n) == 0)) {
839 tmp_index = (index - 1);
841 tmp_index = backup_search_for_last_avg(d, ds_n);
844 if ((tmp_index > -1) &&
845 (cur_count > d->last_poll_data[tmp_index]->last_count)) {
846 sum_delt = (cur_sum - d->last_poll_data[tmp_index]->last_sum);
847 count_delt = (cur_count - d->last_poll_data[tmp_index]->last_count);
848 result = (sum_delt / count_delt);
852 if (result == -1.1) {
855 if (update_last(d, ds_n, tmp_index, cur_sum, cur_count) == -ENOMEM) {
862 * If using index guess failed, resort to searching for counter name
864 static uint32_t backup_search_for_type(struct ceph_daemon *d, char *ds_name) {
865 for (int i = 0; i < d->ds_num; i++) {
866 if (strcmp(d->ds_names[i], ds_name) == 0) {
867 return d->ds_types[i];
870 return DSET_TYPE_UNFOUND;
874 * Process counter data and dispatch values
876 static int node_handler_fetch_data(void *arg, const char *val,
881 struct values_tmp *vtmp = (struct values_tmp *)arg;
882 uint32_t type = DSET_TYPE_UNFOUND;
883 int index = vtmp->index;
885 char ds_name[DATA_MAX_NAME_LEN];
887 if (parse_keys(ds_name, sizeof(ds_name), key)) {
891 if (index >= vtmp->d->ds_num) {
892 // don't overflow bounds of array
893 index = (vtmp->d->ds_num - 1);
897 * counters should remain in same order we parsed schema... we maintain the
898 * index variable to keep track of current point in list of counters. first
899 * use index to guess point in array for retrieving type. if that doesn't
900 * work, use the old way to get the counter type
902 if (strcmp(ds_name, vtmp->d->ds_names[index]) == 0) {
904 type = vtmp->d->ds_types[index];
905 } else if ((index > 0) &&
906 (strcmp(ds_name, vtmp->d->ds_names[index - 1]) == 0)) {
908 type = vtmp->d->ds_types[index - 1];
911 if (type == DSET_TYPE_UNFOUND) {
912 // couldn't find right type by guessing, check the old way
913 type = backup_search_for_type(vtmp->d, ds_name);
918 if (vtmp->latency_next_metric == CEPH_AVGCOUNT) {
919 sscanf(val, "%" PRIu64, &vtmp->avgcount);
920 vtmp->latency_next_metric = CEPH_SUM;
921 // return after saving avgcount - don't dispatch value
922 // until latency calculation
924 } else if (vtmp->latency_next_metric == CEPH_SUM) {
925 if (vtmp->avgcount == 0) {
928 vtmp->latency_next_metric = CEPH_AVGTIME;
929 // user wants latency values as long run avg
931 if (long_run_latency_avg) {
935 sscanf(val, "%lf", &sum);
936 result = get_last_avg(vtmp->d, ds_name, vtmp->latency_index, sum,
938 if (result == -ENOMEM) {
942 vtmp->latency_index = (vtmp->latency_index + 1);
943 } else if (vtmp->latency_next_metric == CEPH_AVGTIME) {
944 vtmp->latency_next_metric = CEPH_AVGCOUNT;
945 // skip this step if no need in long run latency
946 if (!long_run_latency_avg) {
950 sscanf(val, "%lf", &result);
952 vtmp->latency_index = (vtmp->latency_index + 1);
956 sscanf(val, "%lf", &tmp_d);
960 sscanf(val, "%" PRIu64, &tmp_u);
963 case DSET_TYPE_UNFOUND:
965 ERROR("ceph plugin: ds %s was not properly initialized.", ds_name);
969 sstrncpy(vtmp->vlist.type, ceph_dset_types[type], sizeof(vtmp->vlist.type));
970 sstrncpy(vtmp->vlist.type_instance, ds_name,
971 sizeof(vtmp->vlist.type_instance));
972 vtmp->vlist.values = &uv;
973 vtmp->vlist.values_len = 1;
975 vtmp->index = (vtmp->index + 1);
976 plugin_dispatch_values(&vtmp->vlist);
981 static int cconn_connect(struct cconn *io) {
982 struct sockaddr_un address = {0};
984 if (io->state != CSTATE_UNCONNECTED) {
985 ERROR("ceph plugin: cconn_connect: io->state != CSTATE_UNCONNECTED");
988 fd = socket(PF_UNIX, SOCK_STREAM, 0);
991 ERROR("ceph plugin: cconn_connect: socket(PF_UNIX, SOCK_STREAM, 0) "
996 address.sun_family = AF_UNIX;
997 snprintf(address.sun_path, sizeof(address.sun_path), "%s", io->d->asok_path);
998 RETRY_ON_EINTR(err, connect(fd, (struct sockaddr *)&address,
999 sizeof(struct sockaddr_un)));
1001 ERROR("ceph plugin: cconn_connect: connect(%d) failed: error %d", fd, err);
1006 flags = fcntl(fd, F_GETFL, 0);
1007 if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0) {
1009 ERROR("ceph plugin: cconn_connect: fcntl(%d, O_NONBLOCK) error %d", fd,
1015 io->state = CSTATE_WRITE_REQUEST;
1022 static void cconn_close(struct cconn *io) {
1023 io->state = CSTATE_UNCONNECTED;
1024 if (io->asok != -1) {
1026 RETRY_ON_EINTR(res, close(io->asok));
1035 /* Process incoming JSON counter data */
1036 static int cconn_process_data(struct cconn *io, yajl_struct *yajl,
1039 struct values_tmp *vtmp = calloc(1, sizeof(struct values_tmp) * 1);
1044 vtmp->vlist = (value_list_t)VALUE_LIST_INIT;
1045 sstrncpy(vtmp->vlist.plugin, "ceph", sizeof(vtmp->vlist.plugin));
1046 sstrncpy(vtmp->vlist.plugin_instance, io->d->name,
1047 sizeof(vtmp->vlist.plugin_instance));
1050 vtmp->latency_next_metric = CEPH_AVGCOUNT;
1051 vtmp->latency_index = 0;
1053 yajl->handler_arg = vtmp;
1054 ret = traverse_json(io->json, io->json_len, hand);
1060 * Initiate JSON parsing and print error if one occurs
1062 static int cconn_process_json(struct cconn *io) {
1063 if ((io->request_type != ASOK_REQ_DATA) &&
1064 (io->request_type != ASOK_REQ_SCHEMA)) {
1072 hand = yajl_alloc(&callbacks,
1074 /* alloc funcs = */ NULL,
1076 /* alloc funcs = */ NULL, NULL,
1078 /* context = */ (void *)(&io->yajl));
1081 ERROR("ceph plugin: yajl_alloc failed.");
1087 switch (io->request_type) {
1089 io->yajl.handler = node_handler_fetch_data;
1090 result = cconn_process_data(io, &io->yajl, hand);
1092 case ASOK_REQ_SCHEMA:
1093 // init daemon specific variables
1095 io->d->last_idx = 0;
1096 io->d->last_poll_data = NULL;
1097 io->yajl.handler = node_handler_define_schema;
1098 io->yajl.handler_arg = io->d;
1099 result = traverse_json(io->json, io->json_len, hand);
1108 status = yajl_complete_parse(hand);
1110 status = yajl_parse_complete(hand);
1113 if (status != yajl_status_ok) {
1114 unsigned char *errmsg =
1115 yajl_get_error(hand, /* verbose = */ 0,
1116 /* jsonText = */ NULL, /* jsonTextLen = */ 0);
1117 ERROR("ceph plugin: yajl_parse_complete failed: %s", (char *)errmsg);
1118 yajl_free_error(hand, errmsg);
1128 static int cconn_validate_revents(struct cconn *io, int revents) {
1129 if (revents & POLLERR) {
1130 ERROR("ceph plugin: cconn_validate_revents(name=%s): got POLLERR",
1134 switch (io->state) {
1135 case CSTATE_WRITE_REQUEST:
1136 return (revents & POLLOUT) ? 0 : -EINVAL;
1137 case CSTATE_READ_VERSION:
1138 case CSTATE_READ_AMT:
1139 case CSTATE_READ_JSON:
1140 return (revents & POLLIN) ? 0 : -EINVAL;
1142 ERROR("ceph plugin: cconn_validate_revents(name=%s) got to "
1143 "illegal state on line %d",
1144 io->d->name, __LINE__);
1149 /** Handle a network event for a connection */
1150 static int cconn_handle_event(struct cconn *io) {
1152 switch (io->state) {
1153 case CSTATE_UNCONNECTED:
1154 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1156 io->d->name, __LINE__);
1159 case CSTATE_WRITE_REQUEST: {
1161 snprintf(cmd, sizeof(cmd), "%s%d%s", "{ \"prefix\": \"", io->request_type,
1163 size_t cmd_len = strlen(cmd);
1165 ret, write(io->asok, ((char *)&cmd) + io->amt, cmd_len - io->amt));
1166 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,amt=%d,ret=%d)",
1167 io->d->name, io->state, io->amt, ret);
1172 if (io->amt >= cmd_len) {
1174 switch (io->request_type) {
1175 case ASOK_REQ_VERSION:
1176 io->state = CSTATE_READ_VERSION;
1179 io->state = CSTATE_READ_AMT;
1185 case CSTATE_READ_VERSION: {
1186 RETRY_ON_EINTR(ret, read(io->asok, ((char *)(&io->d->version)) + io->amt,
1187 sizeof(io->d->version) - io->amt));
1188 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1189 io->d->name, io->state, ret);
1194 if (io->amt >= sizeof(io->d->version)) {
1195 io->d->version = ntohl(io->d->version);
1196 if (io->d->version != 1) {
1197 ERROR("ceph plugin: cconn_handle_event(name=%s) not "
1198 "expecting version %d!",
1199 io->d->name, io->d->version);
1202 DEBUG("ceph plugin: cconn_handle_event(name=%s): identified as "
1204 io->d->name, io->d->version);
1207 io->request_type = ASOK_REQ_SCHEMA;
1211 case CSTATE_READ_AMT: {
1212 RETRY_ON_EINTR(ret, read(io->asok, ((char *)(&io->json_len)) + io->amt,
1213 sizeof(io->json_len) - io->amt));
1214 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1215 io->d->name, io->state, ret);
1220 if (io->amt >= sizeof(io->json_len)) {
1221 io->json_len = ntohl(io->json_len);
1223 io->state = CSTATE_READ_JSON;
1224 io->json = calloc(1, io->json_len + 1);
1226 ERROR("ceph plugin: error callocing io->json");
1232 case CSTATE_READ_JSON: {
1234 read(io->asok, io->json + io->amt, io->json_len - io->amt));
1235 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1236 io->d->name, io->state, ret);
1241 if (io->amt >= io->json_len) {
1242 ret = cconn_process_json(io);
1247 io->request_type = ASOK_REQ_NONE;
1252 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1254 io->d->name, __LINE__);
1259 static int cconn_prepare(struct cconn *io, struct pollfd *fds) {
1261 if (io->request_type == ASOK_REQ_NONE) {
1262 /* The request has already been serviced. */
1264 } else if ((io->request_type == ASOK_REQ_DATA) && (io->d->ds_num == 0)) {
1265 /* If there are no counters to report on, don't bother
1270 switch (io->state) {
1271 case CSTATE_UNCONNECTED:
1272 ret = cconn_connect(io);
1275 } else if (ret < 0) {
1279 fds->events = POLLOUT;
1281 case CSTATE_WRITE_REQUEST:
1283 fds->events = POLLOUT;
1285 case CSTATE_READ_VERSION:
1286 case CSTATE_READ_AMT:
1287 case CSTATE_READ_JSON:
1289 fds->events = POLLIN;
1292 ERROR("ceph plugin: cconn_prepare(name=%s) got to illegal state "
1294 io->d->name, __LINE__);
1299 /** Returns the difference between two struct timevals in milliseconds.
1300 * On overflow, we return max/min int.
1302 static int milli_diff(const struct timeval *t1, const struct timeval *t2) {
1304 int sec_diff = t1->tv_sec - t2->tv_sec;
1305 int usec_diff = t1->tv_usec - t2->tv_usec;
1306 ret = usec_diff / 1000;
1307 ret += (sec_diff * 1000);
1308 return (ret > INT_MAX) ? INT_MAX : ((ret < INT_MIN) ? INT_MIN : (int)ret);
1311 /** This handles the actual network I/O to talk to the Ceph daemons.
1313 static int cconn_main_loop(uint32_t request_type) {
1314 int ret, some_unreachable = 0;
1315 struct timeval end_tv;
1316 struct cconn io_array[g_num_daemons];
1318 DEBUG("ceph plugin: entering cconn_main_loop(request_type = %" PRIu32 ")",
1321 if (g_num_daemons < 1) {
1322 ERROR("ceph plugin: No daemons configured. See the \"Daemon\" config "
1327 /* create cconn array */
1328 for (size_t i = 0; i < g_num_daemons; i++) {
1329 io_array[i] = (struct cconn){
1331 .request_type = request_type,
1332 .state = CSTATE_UNCONNECTED,
1336 /** Calculate the time at which we should give up */
1337 gettimeofday(&end_tv, NULL);
1338 end_tv.tv_sec += CEPH_TIMEOUT_INTERVAL;
1343 struct cconn *polled_io_array[g_num_daemons];
1344 struct pollfd fds[g_num_daemons];
1345 memset(fds, 0, sizeof(fds));
1347 for (size_t i = 0; i < g_num_daemons; ++i) {
1348 struct cconn *io = io_array + i;
1349 ret = cconn_prepare(io, fds + nfds);
1351 WARNING("ceph plugin: cconn_prepare(name=%s,i=%zu,st=%d)=%d",
1352 io->d->name, i, io->state, ret);
1354 io->request_type = ASOK_REQ_NONE;
1355 some_unreachable = 1;
1356 } else if (ret == 1) {
1357 polled_io_array[nfds++] = io_array + i;
1365 gettimeofday(&tv, NULL);
1366 diff = milli_diff(&end_tv, &tv);
1370 WARNING("ceph plugin: cconn_main_loop: timed out.");
1373 RETRY_ON_EINTR(ret, poll(fds, nfds, diff));
1375 ERROR("ceph plugin: poll(2) error: %d", ret);
1378 for (int i = 0; i < nfds; ++i) {
1379 struct cconn *io = polled_io_array[i];
1380 int revents = fds[i].revents;
1384 } else if (cconn_validate_revents(io, revents)) {
1385 WARNING("ceph plugin: cconn(name=%s,i=%d,st=%d): "
1386 "revents validation error: "
1388 io->d->name, i, io->state, revents);
1390 io->request_type = ASOK_REQ_NONE;
1391 some_unreachable = 1;
1393 ret = cconn_handle_event(io);
1395 WARNING("ceph plugin: cconn_handle_event(name=%s,"
1396 "i=%d,st=%d): error %d",
1397 io->d->name, i, io->state, ret);
1399 io->request_type = ASOK_REQ_NONE;
1400 some_unreachable = 1;
1406 for (size_t i = 0; i < g_num_daemons; ++i) {
1407 cconn_close(io_array + i);
1409 if (some_unreachable) {
1410 DEBUG("ceph plugin: cconn_main_loop: some Ceph daemons were unreachable.");
1412 DEBUG("ceph plugin: cconn_main_loop: reached all Ceph daemons :)");
1417 static int ceph_read(void) { return cconn_main_loop(ASOK_REQ_DATA); }
1419 /******* lifecycle *******/
1420 static int ceph_init(void) {
1421 #if defined(HAVE_SYS_CAPABILITY_H) && defined(CAP_DAC_OVERRIDE)
1422 if (check_capability(CAP_DAC_OVERRIDE) != 0) {
1424 WARNING("ceph plugin: Running collectd as root, but the "
1425 "CAP_DAC_OVERRIDE capability is missing. The plugin's read "
1426 "function will probably fail. Is your init system dropping "
1430 "ceph plugin: collectd doesn't have the CAP_DAC_OVERRIDE "
1431 "capability. If you don't want to run collectd as root, try running "
1432 "\"setcap cap_dac_override=ep\" on the collectd binary.");
1436 ceph_daemons_print();
1438 if (g_num_daemons < 1) {
1439 ERROR("ceph plugin: No daemons configured. See the \"Daemon\" config "
1444 return cconn_main_loop(ASOK_REQ_VERSION);
1447 static int ceph_shutdown(void) {
1448 for (size_t i = 0; i < g_num_daemons; ++i) {
1449 ceph_daemon_free(g_daemons[i]);
1454 DEBUG("ceph plugin: finished ceph_shutdown");
1458 void module_register(void) {
1459 plugin_register_complex_config("ceph", ceph_config);
1460 plugin_register_init("ceph", ceph_init);
1461 plugin_register_read("ceph", ceph_read);
1462 plugin_register_shutdown("ceph", ceph_shutdown);