2 * collectd - src/ceph.c
3 * Copyright (C) 2011 New Dream Network
4 * Copyright (C) 2015 Florian octo Forster
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; only version 2 of the License is applicable.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Colin McCabe <cmccabe at alumni.cmu.edu>
21 * Dennis Zou <yunzou at cisco.com>
22 * Dan Ryder <daryder at cisco.com>
23 * Florian octo Forster <octo at collectd.org>
26 #define _DEFAULT_SOURCE
33 #include <arpa/inet.h>
36 #include <yajl/yajl_parse.h>
37 #if HAVE_YAJL_YAJL_VERSION_H
38 #include <yajl/yajl_version.h>
48 #include <sys/socket.h>
50 #include <sys/types.h>
56 #define RETRY_AVGCOUNT -1
58 #if defined(YAJL_MAJOR) && (YAJL_MAJOR > 1)
59 # define HAVE_YAJL_V2 1
62 #define RETRY_ON_EINTR(ret, expr) \
72 /** Timeout interval in seconds */
73 #define CEPH_TIMEOUT_INTERVAL 1
75 /** Maximum path length for a UNIX domain socket on this system */
76 #define UNIX_DOMAIN_SOCK_PATH_MAX (sizeof(((struct sockaddr_un*)0)->sun_path))
78 /** Yajl callback returns */
79 #define CEPH_CB_CONTINUE 1
80 #define CEPH_CB_ABORT 0
83 typedef size_t yajl_len_t;
85 typedef unsigned int yajl_len_t;
88 /** Number of types for ceph defined in types.db */
89 #define CEPH_DSET_TYPES_NUM 3
90 /** ceph types enum */
96 DSET_TYPE_UNFOUND = 1000
99 /** Valid types for ceph defined in types.db */
100 const char * ceph_dset_types [CEPH_DSET_TYPES_NUM] =
101 {"ceph_latency", "ceph_bytes", "ceph_rate"};
103 /******* ceph_daemon *******/
106 /** Version of the admin_socket interface */
109 char name[DATA_MAX_NAME_LEN];
111 /** Path to the socket that we use to talk to the ceph daemon */
112 char asok_path[UNIX_DOMAIN_SOCK_PATH_MAX];
114 /** Number of counters */
116 /** Track ds types */
118 /** Track ds names to match with types */
122 * Keep track of last data for latency values so we can calculate rate
125 struct last_data **last_poll_data;
126 /** index of last poll data */
130 /******* JSON parsing *******/
131 typedef int (*node_handler_t)(void *, const char*, const char*);
133 /** Track state and handler while parsing JSON */
136 node_handler_t handler;
139 char key[DATA_MAX_NAME_LEN];
141 } state[YAJL_MAX_DEPTH];
144 typedef struct yajl_struct yajl_struct;
146 enum perfcounter_type_d
148 PERFCOUNTER_LATENCY = 0x4, PERFCOUNTER_DERIVE = 0x8,
151 /** Give user option to use default (long run = since daemon started) avg */
152 static int long_run_latency_avg = 0;
155 * Give user option to use default type for special cases -
156 * filestore.journal_wr_bytes is currently only metric here. Ceph reports the
157 * type as a sum/count pair and will calculate it the same as a latency value.
158 * All other "bytes" metrics (excluding the used/capacity bytes for the OSD)
159 * use the DERIVE type. Unless user specifies to use given type, convert this
160 * metric to use DERIVE.
162 static int convert_special_metrics = 1;
164 /** Array of daemons to monitor */
165 static struct ceph_daemon **g_daemons = NULL;
167 /** Number of elements in g_daemons */
168 static int g_num_daemons = 0;
171 * A set of data that we build up in memory while parsing the JSON.
175 /** ceph daemon we are processing data for*/
176 struct ceph_daemon *d;
177 /** track avgcount across counters for avgcount/sum latency pairs */
179 /** current index of counters - used to get type of counter */
181 /** do we already have an avgcount for latency pair */
184 * similar to index, but current index of latency type counters -
185 * used to get last poll data of counter
189 * values list - maintain across counters since
190 * host/plugin/plugin instance are always the same
196 * A set of count/sum pairs to keep track of latency types and get difference
197 * between this poll data and last poll data.
201 char ds_name[DATA_MAX_NAME_LEN];
206 /******* network I/O *******/
209 CSTATE_UNCONNECTED = 0,
210 CSTATE_WRITE_REQUEST,
218 ASOK_REQ_VERSION = 0,
221 ASOK_REQ_NONE = 1000,
226 /** The Ceph daemon that we're talking to */
227 struct ceph_daemon *d;
230 uint32_t request_type;
232 /** The connection state */
235 /** The socket we use to talk to this daemon */
238 /** The amount of data remaining to read / write. */
241 /** Length of the JSON to read */
244 /** Buffer containing JSON data */
247 /** Keep data important to yajl processing */
248 struct yajl_struct yajl;
251 static int ceph_cb_null(void *ctx)
253 return CEPH_CB_CONTINUE;
256 static int ceph_cb_boolean(void *ctx, int bool_val)
258 return CEPH_CB_CONTINUE;
262 ceph_cb_number(void *ctx, const char *number_val, yajl_len_t number_len)
264 yajl_struct *yajl = (yajl_struct*)ctx;
265 char buffer[number_len+1];
266 int i, latency_type = 0, result;
269 memcpy(buffer, number_val, number_len);
270 buffer[sizeof(buffer) - 1] = 0;
272 ssnprintf(key, yajl->state[0].key_len, "%s", yajl->state[0].key);
273 for(i = 1; i < yajl->depth; i++)
275 if((i == yajl->depth-1) && ((strcmp(yajl->state[i].key,"avgcount") == 0)
276 || (strcmp(yajl->state[i].key,"sum") == 0)))
278 if(convert_special_metrics)
281 * Special case for filestore:JournalWrBytes. For some reason,
282 * Ceph schema encodes this as a count/sum pair while all
283 * other "Bytes" data (excluding used/capacity bytes for OSD
284 * space) uses a single "Derive" type. To spare further
285 * confusion, keep this KPI as the same type of other "Bytes".
286 * Instead of keeping an "average" or "rate", use the "sum" in
287 * the pair and assign that to the derive value.
289 if((strcmp(yajl->state[i-1].key, "journal_wr_bytes") == 0) &&
290 (strcmp(yajl->state[i-2].key,"filestore") == 0) &&
291 (strcmp(yajl->state[i].key,"avgcount") == 0))
293 DEBUG("ceph plugin: Skipping avgcount for filestore.JournalWrBytes");
294 yajl->depth = (yajl->depth - 1);
295 return CEPH_CB_CONTINUE;
298 //probably a avgcount/sum pair. if not - we'll try full key later
302 strncat(key, ".", 1);
303 strncat(key, yajl->state[i].key, yajl->state[i].key_len+1);
306 result = yajl->handler(yajl->handler_arg, buffer, key);
308 if((result == RETRY_AVGCOUNT) && latency_type)
310 strncat(key, ".", 1);
311 strncat(key, yajl->state[yajl->depth-1].key,
312 yajl->state[yajl->depth-1].key_len+1);
313 result = yajl->handler(yajl->handler_arg, buffer, key);
316 if(result == -ENOMEM)
318 ERROR("ceph plugin: memory allocation failed");
319 return CEPH_CB_ABORT;
322 yajl->depth = (yajl->depth - 1);
323 return CEPH_CB_CONTINUE;
326 static int ceph_cb_string(void *ctx, const unsigned char *string_val,
327 yajl_len_t string_len)
329 return CEPH_CB_CONTINUE;
332 static int ceph_cb_start_map(void *ctx)
334 return CEPH_CB_CONTINUE;
338 ceph_cb_map_key(void *ctx, const unsigned char *key, yajl_len_t string_len)
340 yajl_struct *yajl = (yajl_struct*)ctx;
342 if((yajl->depth+1) >= YAJL_MAX_DEPTH)
344 ERROR("ceph plugin: depth exceeds max, aborting.");
345 return CEPH_CB_ABORT;
348 char buffer[string_len+1];
350 memcpy(buffer, key, string_len);
351 buffer[sizeof(buffer) - 1] = 0;
353 snprintf(yajl->state[yajl->depth].key, sizeof(buffer), "%s", buffer);
354 yajl->state[yajl->depth].key_len = sizeof(buffer);
355 yajl->depth = (yajl->depth + 1);
357 return CEPH_CB_CONTINUE;
360 static int ceph_cb_end_map(void *ctx)
362 yajl_struct *yajl = (yajl_struct*)ctx;
364 yajl->depth = (yajl->depth - 1);
365 return CEPH_CB_CONTINUE;
368 static int ceph_cb_start_array(void *ctx)
370 return CEPH_CB_CONTINUE;
373 static int ceph_cb_end_array(void *ctx)
375 return CEPH_CB_CONTINUE;
378 static yajl_callbacks callbacks = {
392 static void ceph_daemon_print(const struct ceph_daemon *d)
394 DEBUG("ceph plugin: name=%s, asok_path=%s", d->name, d->asok_path);
397 static void ceph_daemons_print(void)
400 for(i = 0; i < g_num_daemons; ++i)
402 ceph_daemon_print(g_daemons[i]);
406 static void ceph_daemon_free(struct ceph_daemon *d)
409 for(; i < d->last_idx; i++)
411 sfree(d->last_poll_data[i]);
413 sfree(d->last_poll_data);
414 d->last_poll_data = NULL;
416 for(i = 0; i < d->ds_num; i++)
418 sfree(d->ds_names[i]);
425 /* compact_ds_name removed the special characters ":", "_", "-" and "+" from the
426 * intput string. Characters following these special characters are capitalized.
427 * Trailing "+" and "-" characters are replaces with the strings "Plus" and
429 static int compact_ds_name (char *buffer, size_t buffer_size, char const *src)
434 size_t ptr_size = buffer_size;
435 _Bool append_plus = 0;
436 _Bool append_minus = 0;
438 if ((buffer == NULL) || (buffer_size <= strlen ("Minus")) || (src == NULL))
441 src_copy = strdup (src);
442 src_len = strlen(src);
444 /* Remove trailing "+" and "-". */
445 if (src_copy[src_len - 1] == '+')
449 src_copy[src_len] = 0;
451 else if (src_copy[src_len - 1] == '-')
455 src_copy[src_len] = 0;
458 /* Split at special chars, capitalize first character, append to buffer. */
459 char *dummy = src_copy;
461 char *save_ptr = NULL;
462 while ((token = strtok_r (dummy, ":_-+", &save_ptr)) != NULL)
468 token[0] = toupper ((int) token[0]);
470 assert (ptr_size > 1);
472 len = strlen (token);
477 assert (len < ptr_size);
479 sstrncpy (ptr, token, len + 1);
488 /* Append "Plus" or "Minus" if "+" or "-" has been stripped above. */
489 if (append_plus || append_minus)
491 char const *append = "Plus";
495 size_t offset = buffer_size - (strlen (append) + 1);
496 if (offset > strlen (buffer))
497 offset = strlen (buffer);
499 sstrncpy (buffer + offset, append, buffer_size - offset);
506 static _Bool has_suffix (char const *str, char const *suffix)
508 size_t str_len = strlen (str);
509 size_t suffix_len = strlen (suffix);
512 if (suffix_len > str_len)
514 offset = str_len - suffix_len;
516 if (strcmp (str + offset, suffix) == 0)
522 /* count_parts returns the number of elements a "foo.bar.baz" style key has. */
523 static size_t count_parts (char const *key)
526 size_t parts_num = 0;
528 for (ptr = key; ptr != NULL; ptr = strchr (ptr + 1, '.'))
535 * Parse key to remove "type" if this is for schema and initiate compaction
537 static int parse_keys (char *buffer, size_t buffer_size, const char *key_str)
539 char tmp[2 * buffer_size];
541 if (buffer == NULL || buffer_size == 0 || key_str == NULL || strlen (key_str) == 0)
544 if ((count_parts (key_str) > 2) && has_suffix (key_str, ".type"))
546 /* strip ".type" suffix iff the key has more than two parts. */
547 size_t sz = strlen (key_str) - strlen (".type") + 1;
549 if (sz > sizeof (tmp))
551 sstrncpy (tmp, key_str, sz);
555 sstrncpy (tmp, key_str, sizeof (tmp));
558 return compact_ds_name (buffer, buffer_size, tmp);
562 * while parsing ceph admin socket schema, save counter name and type for later
565 static int ceph_daemon_add_ds_entry(struct ceph_daemon *d, const char *name,
569 char ds_name[DATA_MAX_NAME_LEN];
570 memset(ds_name, 0, sizeof(ds_name));
572 if(convert_special_metrics)
575 * Special case for filestore:JournalWrBytes. For some reason, Ceph
576 * schema encodes this as a count/sum pair while all other "Bytes" data
577 * (excluding used/capacity bytes for OSD space) uses a single "Derive"
578 * type. To spare further confusion, keep this KPI as the same type of
579 * other "Bytes". Instead of keeping an "average" or "rate", use the
580 * "sum" in the pair and assign that to the derive value.
582 if((strcmp(name,"filestore.journal_wr_bytes.type") == 0))
588 d->ds_names = realloc(d->ds_names, sizeof(char *) * (d->ds_num + 1));
594 d->ds_types = realloc(d->ds_types, sizeof(uint32_t) * (d->ds_num + 1));
600 d->ds_names[d->ds_num] = malloc(sizeof(char) * DATA_MAX_NAME_LEN);
601 if(!d->ds_names[d->ds_num])
606 type = (pc_type & PERFCOUNTER_DERIVE) ? DSET_RATE :
607 ((pc_type & PERFCOUNTER_LATENCY) ? DSET_LATENCY : DSET_BYTES);
608 d->ds_types[d->ds_num] = type;
610 if (parse_keys(ds_name, sizeof (ds_name), name))
615 sstrncpy(d->ds_names[d->ds_num], ds_name, DATA_MAX_NAME_LEN -1);
616 d->ds_num = (d->ds_num + 1);
621 /******* ceph_config *******/
622 static int cc_handle_str(struct oconfig_item_s *item, char *dest, int dest_len)
625 if(item->values_num != 1)
629 if(item->values[0].type != OCONFIG_TYPE_STRING)
633 val = item->values[0].value.string;
634 if(snprintf(dest, dest_len, "%s", val) > (dest_len - 1))
636 ERROR("ceph plugin: configuration parameter '%s' is too long.\n",
638 return -ENAMETOOLONG;
643 static int cc_handle_bool(struct oconfig_item_s *item, int *dest)
645 if(item->values_num != 1)
650 if(item->values[0].type != OCONFIG_TYPE_BOOLEAN)
655 *dest = (item->values[0].value.boolean) ? 1 : 0;
659 static int cc_add_daemon_config(oconfig_item_t *ci)
662 struct ceph_daemon *nd, cd;
663 struct ceph_daemon **tmp;
664 memset(&cd, 0, sizeof(struct ceph_daemon));
666 if((ci->values_num != 1) || (ci->values[0].type != OCONFIG_TYPE_STRING))
668 WARNING("ceph plugin: `Daemon' blocks need exactly one string "
673 ret = cc_handle_str(ci, cd.name, DATA_MAX_NAME_LEN);
679 for(i=0; i < ci->children_num; i++)
681 oconfig_item_t *child = ci->children + i;
683 if(strcasecmp("SocketPath", child->key) == 0)
685 ret = cc_handle_str(child, cd.asok_path, sizeof(cd.asok_path));
693 WARNING("ceph plugin: ignoring unknown option %s", child->key);
696 if(cd.name[0] == '\0')
698 ERROR("ceph plugin: you must configure a daemon name.\n");
701 else if(cd.asok_path[0] == '\0')
703 ERROR("ceph plugin(name=%s): you must configure an administrative "
704 "socket path.\n", cd.name);
707 else if(!((cd.asok_path[0] == '/') ||
708 (cd.asok_path[0] == '.' && cd.asok_path[1] == '/')))
710 ERROR("ceph plugin(name=%s): administrative socket paths must begin "
711 "with '/' or './' Can't parse: '%s'\n", cd.name, cd.asok_path);
715 tmp = realloc(g_daemons, (g_num_daemons+1) * sizeof(*g_daemons));
718 /* The positive return value here indicates that this is a
719 * runtime error, not a configuration error. */
724 nd = malloc(sizeof(*nd));
729 memcpy(nd, &cd, sizeof(*nd));
730 g_daemons[g_num_daemons++] = nd;
734 static int ceph_config(oconfig_item_t *ci)
738 for(i = 0; i < ci->children_num; ++i)
740 oconfig_item_t *child = ci->children + i;
741 if(strcasecmp("Daemon", child->key) == 0)
743 ret = cc_add_daemon_config(child);
746 ERROR("ceph plugin: Couldn't allocate memory");
751 //process other daemons and ignore this one
755 else if(strcasecmp("LongRunAvgLatency", child->key) == 0)
757 ret = cc_handle_bool(child, &long_run_latency_avg);
763 else if(strcasecmp("ConvertSpecialMetricTypes", child->key) == 0)
765 ret = cc_handle_bool(child, &convert_special_metrics);
773 WARNING("ceph plugin: ignoring unknown option %s", child->key);
780 * Parse JSON and get error message if present
783 traverse_json(const unsigned char *json, uint32_t json_len, yajl_handle hand)
785 yajl_status status = yajl_parse(hand, json, json_len);
790 case yajl_status_error:
791 msg = yajl_get_error(hand, /* verbose = */ 1,
792 /* jsonText = */ (unsigned char *) json,
793 (unsigned int) json_len);
794 ERROR ("ceph plugin: yajl_parse failed: %s", msg);
795 yajl_free_error(hand, msg);
797 case yajl_status_client_canceled:
805 * Add entry for each counter while parsing schema
808 node_handler_define_schema(void *arg, const char *val, const char *key)
810 struct ceph_daemon *d = (struct ceph_daemon *) arg;
813 return ceph_daemon_add_ds_entry(d, key, pc_type);
817 * Latency counter does not yet have an entry in last poll data - add it.
819 static int add_last(struct ceph_daemon *d, const char *ds_n, double cur_sum,
822 d->last_poll_data[d->last_idx] = malloc(1 * sizeof(struct last_data));
823 if(!d->last_poll_data[d->last_idx])
827 sstrncpy(d->last_poll_data[d->last_idx]->ds_name,ds_n,
828 sizeof(d->last_poll_data[d->last_idx]->ds_name));
829 d->last_poll_data[d->last_idx]->last_sum = cur_sum;
830 d->last_poll_data[d->last_idx]->last_count = cur_count;
831 d->last_idx = (d->last_idx + 1);
836 * Update latency counter or add new entry if it doesn't exist
838 static int update_last(struct ceph_daemon *d, const char *ds_n, int index,
839 double cur_sum, uint64_t cur_count)
841 if((d->last_idx > index) && (strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0))
843 d->last_poll_data[index]->last_sum = cur_sum;
844 d->last_poll_data[index]->last_count = cur_count;
848 if(!d->last_poll_data)
850 d->last_poll_data = malloc(1 * sizeof(struct last_data *));
851 if(!d->last_poll_data)
858 struct last_data **tmp_last = realloc(d->last_poll_data,
859 ((d->last_idx+1) * sizeof(struct last_data *)));
864 d->last_poll_data = tmp_last;
866 return add_last(d, ds_n, cur_sum, cur_count);
870 * If using index guess failed (shouldn't happen, but possible if counters
871 * get rearranged), resort to searching for counter name
873 static int backup_search_for_last_avg(struct ceph_daemon *d, const char *ds_n)
876 for(; i < d->last_idx; i++)
878 if(strcmp(d->last_poll_data[i]->ds_name, ds_n) == 0)
887 * Calculate average b/t current data and last poll data
888 * if last poll data exists
890 static double get_last_avg(struct ceph_daemon *d, const char *ds_n, int index,
891 double cur_sum, uint64_t cur_count)
893 double result = -1.1, sum_delt = 0.0;
894 uint64_t count_delt = 0;
896 if(d->last_idx > index)
898 if(strcmp(d->last_poll_data[index]->ds_name, ds_n) == 0)
902 //test previous index
903 else if((index > 0) && (strcmp(d->last_poll_data[index-1]->ds_name, ds_n) == 0))
905 tmp_index = (index - 1);
909 tmp_index = backup_search_for_last_avg(d, ds_n);
912 if((tmp_index > -1) && (cur_count > d->last_poll_data[tmp_index]->last_count))
914 sum_delt = (cur_sum - d->last_poll_data[tmp_index]->last_sum);
915 count_delt = (cur_count - d->last_poll_data[tmp_index]->last_count);
916 result = (sum_delt / count_delt);
924 if(update_last(d, ds_n, tmp_index, cur_sum, cur_count) == -ENOMEM)
932 * If using index guess failed, resort to searching for counter name
934 static uint32_t backup_search_for_type(struct ceph_daemon *d, char *ds_name)
937 for(; idx < d->ds_num; idx++)
939 if(strcmp(d->ds_names[idx], ds_name) == 0)
941 return d->ds_types[idx];
944 return DSET_TYPE_UNFOUND;
948 * Process counter data and dispatch values
950 static int node_handler_fetch_data(void *arg, const char *val, const char *key)
955 struct values_tmp *vtmp = (struct values_tmp*) arg;
956 uint32_t type = DSET_TYPE_UNFOUND;
957 int index = vtmp->index;
959 char ds_name[DATA_MAX_NAME_LEN];
960 memset(ds_name, 0, sizeof(ds_name));
962 if (parse_keys (ds_name, sizeof (ds_name), key))
967 if(index >= vtmp->d->ds_num)
969 //don't overflow bounds of array
970 index = (vtmp->d->ds_num - 1);
974 * counters should remain in same order we parsed schema... we maintain the
975 * index variable to keep track of current point in list of counters. first
976 * use index to guess point in array for retrieving type. if that doesn't
977 * work, use the old way to get the counter type
979 if(strcmp(ds_name, vtmp->d->ds_names[index]) == 0)
982 type = vtmp->d->ds_types[index];
984 else if((index > 0) && (strcmp(ds_name, vtmp->d->ds_names[index-1]) == 0))
987 type = vtmp->d->ds_types[index-1];
990 if(type == DSET_TYPE_UNFOUND)
992 //couldn't find right type by guessing, check the old way
993 type = backup_search_for_type(vtmp->d, ds_name);
999 if(vtmp->avgcount_exists == -1)
1001 sscanf(val, "%" PRIu64, &vtmp->avgcount);
1002 vtmp->avgcount_exists = 0;
1003 //return after saving avgcount - don't dispatch value
1004 //until latency calculation
1010 sscanf(val, "%lf", &sum);
1012 if(vtmp->avgcount == 0)
1017 /** User wants latency values as long run avg */
1018 if(long_run_latency_avg)
1020 result = (sum / vtmp->avgcount);
1024 result = get_last_avg(vtmp->d, ds_name, vtmp->latency_index, sum, vtmp->avgcount);
1025 if(result == -ENOMEM)
1032 vtmp->avgcount_exists = -1;
1033 vtmp->latency_index = (vtmp->latency_index + 1);
1037 sscanf(val, "%lf", &tmp_d);
1041 sscanf(val, "%" PRIu64, &tmp_u);
1044 case DSET_TYPE_UNFOUND:
1046 ERROR("ceph plugin: ds %s was not properly initialized.", ds_name);
1050 sstrncpy(vtmp->vlist.type, ceph_dset_types[type], sizeof(vtmp->vlist.type));
1051 sstrncpy(vtmp->vlist.type_instance, ds_name, sizeof(vtmp->vlist.type_instance));
1052 vtmp->vlist.values = &uv;
1053 vtmp->vlist.values_len = 1;
1055 vtmp->index = (vtmp->index + 1);
1056 plugin_dispatch_values(&vtmp->vlist);
1061 static int cconn_connect(struct cconn *io)
1063 struct sockaddr_un address;
1065 if(io->state != CSTATE_UNCONNECTED)
1067 ERROR("ceph plugin: cconn_connect: io->state != CSTATE_UNCONNECTED");
1070 fd = socket(PF_UNIX, SOCK_STREAM, 0);
1074 ERROR("ceph plugin: cconn_connect: socket(PF_UNIX, SOCK_STREAM, 0) "
1075 "failed: error %d", err);
1078 memset(&address, 0, sizeof(struct sockaddr_un));
1079 address.sun_family = AF_UNIX;
1080 snprintf(address.sun_path, sizeof(address.sun_path), "%s",
1083 connect(fd, (struct sockaddr *) &address, sizeof(struct sockaddr_un)));
1086 ERROR("ceph plugin: cconn_connect: connect(%d) failed: error %d",
1091 flags = fcntl(fd, F_GETFL, 0);
1092 if(fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0)
1095 ERROR("ceph plugin: cconn_connect: fcntl(%d, O_NONBLOCK) error %d",
1100 io->state = CSTATE_WRITE_REQUEST;
1107 static void cconn_close(struct cconn *io)
1109 io->state = CSTATE_UNCONNECTED;
1113 RETRY_ON_EINTR(res, close(io->asok));
1122 /* Process incoming JSON counter data */
1124 cconn_process_data(struct cconn *io, yajl_struct *yajl, yajl_handle hand)
1127 struct values_tmp *vtmp = calloc(1, sizeof(struct values_tmp) * 1);
1133 vtmp->vlist = (value_list_t)VALUE_LIST_INIT;
1134 sstrncpy(vtmp->vlist.host, hostname_g, sizeof(vtmp->vlist.host));
1135 sstrncpy(vtmp->vlist.plugin, "ceph", sizeof(vtmp->vlist.plugin));
1136 sstrncpy(vtmp->vlist.plugin_instance, io->d->name, sizeof(vtmp->vlist.plugin_instance));
1139 vtmp->avgcount_exists = -1;
1140 vtmp->latency_index = 0;
1142 yajl->handler_arg = vtmp;
1143 ret = traverse_json(io->json, io->json_len, hand);
1149 * Initiate JSON parsing and print error if one occurs
1151 static int cconn_process_json(struct cconn *io)
1153 if((io->request_type != ASOK_REQ_DATA) &&
1154 (io->request_type != ASOK_REQ_SCHEMA))
1163 hand = yajl_alloc(&callbacks,
1165 /* alloc funcs = */ NULL,
1167 /* alloc funcs = */ NULL, NULL,
1169 /* context = */ (void *)(&io->yajl));
1173 ERROR ("ceph plugin: yajl_alloc failed.");
1179 switch(io->request_type)
1182 io->yajl.handler = node_handler_fetch_data;
1183 result = cconn_process_data(io, &io->yajl, hand);
1185 case ASOK_REQ_SCHEMA:
1186 //init daemon specific variables
1188 io->d->last_idx = 0;
1189 io->d->last_poll_data = NULL;
1190 io->yajl.handler = node_handler_define_schema;
1191 io->yajl.handler_arg = io->d;
1192 result = traverse_json(io->json, io->json_len, hand);
1202 status = yajl_complete_parse(hand);
1204 status = yajl_parse_complete(hand);
1207 if (status != yajl_status_ok)
1209 unsigned char *errmsg = yajl_get_error (hand, /* verbose = */ 0,
1210 /* jsonText = */ NULL, /* jsonTextLen = */ 0);
1211 ERROR ("ceph plugin: yajl_parse_complete failed: %s",
1213 yajl_free_error (hand, errmsg);
1223 static int cconn_validate_revents(struct cconn *io, int revents)
1225 if(revents & POLLERR)
1227 ERROR("ceph plugin: cconn_validate_revents(name=%s): got POLLERR",
1233 case CSTATE_WRITE_REQUEST:
1234 return (revents & POLLOUT) ? 0 : -EINVAL;
1235 case CSTATE_READ_VERSION:
1236 case CSTATE_READ_AMT:
1237 case CSTATE_READ_JSON:
1238 return (revents & POLLIN) ? 0 : -EINVAL;
1240 ERROR("ceph plugin: cconn_validate_revents(name=%s) got to "
1241 "illegal state on line %d", io->d->name, __LINE__);
1246 /** Handle a network event for a connection */
1247 static int cconn_handle_event(struct cconn *io)
1252 case CSTATE_UNCONNECTED:
1253 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1254 "state on line %d", io->d->name, __LINE__);
1257 case CSTATE_WRITE_REQUEST:
1260 snprintf(cmd, sizeof(cmd), "%s%d%s", "{ \"prefix\": \"",
1261 io->request_type, "\" }\n");
1262 size_t cmd_len = strlen(cmd);
1264 write(io->asok, ((char*)&cmd) + io->amt, cmd_len - io->amt));
1265 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,amt=%d,ret=%d)",
1266 io->d->name, io->state, io->amt, ret);
1272 if(io->amt >= cmd_len)
1275 switch (io->request_type)
1277 case ASOK_REQ_VERSION:
1278 io->state = CSTATE_READ_VERSION;
1281 io->state = CSTATE_READ_AMT;
1287 case CSTATE_READ_VERSION:
1290 read(io->asok, ((char*)(&io->d->version)) + io->amt,
1291 sizeof(io->d->version) - io->amt));
1292 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1293 io->d->name, io->state, ret);
1299 if(io->amt >= sizeof(io->d->version))
1301 io->d->version = ntohl(io->d->version);
1302 if(io->d->version != 1)
1304 ERROR("ceph plugin: cconn_handle_event(name=%s) not "
1305 "expecting version %d!", io->d->name, io->d->version);
1308 DEBUG("ceph plugin: cconn_handle_event(name=%s): identified as "
1309 "version %d", io->d->name, io->d->version);
1312 io->request_type = ASOK_REQ_SCHEMA;
1316 case CSTATE_READ_AMT:
1319 read(io->asok, ((char*)(&io->json_len)) + io->amt,
1320 sizeof(io->json_len) - io->amt));
1321 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1322 io->d->name, io->state, ret);
1328 if(io->amt >= sizeof(io->json_len))
1330 io->json_len = ntohl(io->json_len);
1332 io->state = CSTATE_READ_JSON;
1333 io->json = calloc(1, io->json_len + 1);
1336 ERROR("ceph plugin: error callocing io->json");
1342 case CSTATE_READ_JSON:
1345 read(io->asok, io->json + io->amt, io->json_len - io->amt));
1346 DEBUG("ceph plugin: cconn_handle_event(name=%s,state=%d,ret=%d)",
1347 io->d->name, io->state, ret);
1353 if(io->amt >= io->json_len)
1355 ret = cconn_process_json(io);
1361 io->request_type = ASOK_REQ_NONE;
1366 ERROR("ceph plugin: cconn_handle_event(name=%s) got to illegal "
1367 "state on line %d", io->d->name, __LINE__);
1372 static int cconn_prepare(struct cconn *io, struct pollfd* fds)
1375 if(io->request_type == ASOK_REQ_NONE)
1377 /* The request has already been serviced. */
1380 else if((io->request_type == ASOK_REQ_DATA) && (io->d->ds_num == 0))
1382 /* If there are no counters to report on, don't bother
1389 case CSTATE_UNCONNECTED:
1390 ret = cconn_connect(io);
1400 fds->events = POLLOUT;
1402 case CSTATE_WRITE_REQUEST:
1404 fds->events = POLLOUT;
1406 case CSTATE_READ_VERSION:
1407 case CSTATE_READ_AMT:
1408 case CSTATE_READ_JSON:
1410 fds->events = POLLIN;
1413 ERROR("ceph plugin: cconn_prepare(name=%s) got to illegal state "
1414 "on line %d", io->d->name, __LINE__);
1419 /** Returns the difference between two struct timevals in milliseconds.
1420 * On overflow, we return max/min int.
1422 static int milli_diff(const struct timeval *t1, const struct timeval *t2)
1425 int sec_diff = t1->tv_sec - t2->tv_sec;
1426 int usec_diff = t1->tv_usec - t2->tv_usec;
1427 ret = usec_diff / 1000;
1428 ret += (sec_diff * 1000);
1429 return (ret > INT_MAX) ? INT_MAX : ((ret < INT_MIN) ? INT_MIN : (int)ret);
1432 /** This handles the actual network I/O to talk to the Ceph daemons.
1434 static int cconn_main_loop(uint32_t request_type)
1436 int i, ret, some_unreachable = 0;
1437 struct timeval end_tv;
1438 struct cconn io_array[g_num_daemons];
1440 DEBUG("ceph plugin: entering cconn_main_loop(request_type = %d)", request_type);
1442 /* create cconn array */
1443 memset(io_array, 0, sizeof(io_array));
1444 for(i = 0; i < g_num_daemons; ++i)
1446 io_array[i].d = g_daemons[i];
1447 io_array[i].request_type = request_type;
1448 io_array[i].state = CSTATE_UNCONNECTED;
1451 /** Calculate the time at which we should give up */
1452 gettimeofday(&end_tv, NULL);
1453 end_tv.tv_sec += CEPH_TIMEOUT_INTERVAL;
1459 struct cconn *polled_io_array[g_num_daemons];
1460 struct pollfd fds[g_num_daemons];
1461 memset(fds, 0, sizeof(fds));
1463 for(i = 0; i < g_num_daemons; ++i)
1465 struct cconn *io = io_array + i;
1466 ret = cconn_prepare(io, fds + nfds);
1469 WARNING("ceph plugin: cconn_prepare(name=%s,i=%d,st=%d)=%d",
1470 io->d->name, i, io->state, ret);
1472 io->request_type = ASOK_REQ_NONE;
1473 some_unreachable = 1;
1477 polled_io_array[nfds++] = io_array + i;
1486 gettimeofday(&tv, NULL);
1487 diff = milli_diff(&end_tv, &tv);
1492 WARNING("ceph plugin: cconn_main_loop: timed out.");
1495 RETRY_ON_EINTR(ret, poll(fds, nfds, diff));
1498 ERROR("ceph plugin: poll(2) error: %d", ret);
1501 for(i = 0; i < nfds; ++i)
1503 struct cconn *io = polled_io_array[i];
1504 int revents = fds[i].revents;
1509 else if(cconn_validate_revents(io, revents))
1511 WARNING("ceph plugin: cconn(name=%s,i=%d,st=%d): "
1512 "revents validation error: "
1513 "revents=0x%08x", io->d->name, i, io->state, revents);
1515 io->request_type = ASOK_REQ_NONE;
1516 some_unreachable = 1;
1520 int ret = cconn_handle_event(io);
1523 WARNING("ceph plugin: cconn_handle_event(name=%s,"
1524 "i=%d,st=%d): error %d", io->d->name, i, io->state, ret);
1526 io->request_type = ASOK_REQ_NONE;
1527 some_unreachable = 1;
1532 done: for(i = 0; i < g_num_daemons; ++i)
1534 cconn_close(io_array + i);
1536 if(some_unreachable)
1538 DEBUG("ceph plugin: cconn_main_loop: some Ceph daemons were unreachable.");
1542 DEBUG("ceph plugin: cconn_main_loop: reached all Ceph daemons :)");
1547 static int ceph_read(void)
1549 return cconn_main_loop(ASOK_REQ_DATA);
1552 /******* lifecycle *******/
1553 static int ceph_init(void)
1556 ceph_daemons_print();
1558 ret = cconn_main_loop(ASOK_REQ_VERSION);
1560 return (ret) ? ret : 0;
1563 static int ceph_shutdown(void)
1566 for(i = 0; i < g_num_daemons; ++i)
1568 ceph_daemon_free(g_daemons[i]);
1573 DEBUG("ceph plugin: finished ceph_shutdown");
1577 void module_register(void)
1579 plugin_register_complex_config("ceph", ceph_config);
1580 plugin_register_init("ceph", ceph_init);
1581 plugin_register_read("ceph", ceph_read);
1582 plugin_register_shutdown("ceph", ceph_shutdown);