2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
21 #include <systemd/sd-daemon.h>
23 #include <semaphore.h>
24 #include <mpath_persist.h>
42 #include <blacklist.h>
43 #include <structs_vec.h>
45 #include <devmapper.h>
48 #include <discovery.h>
52 #include <switchgroup.h>
54 #include <configure.h>
56 #include <pgpolicies.h>
65 #include "cli_handlers.h"
70 #define FILE_NAME_SIZE 256
73 #define LOG_MSG(a, b) \
76 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
78 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
81 struct mpath_event_param
84 struct multipath *mpp;
87 unsigned int mpath_mx_alloc_len;
90 enum daemon_status running_state;
93 static sem_t exit_sem;
95 * global copy of vecs for use in sig handlers
97 struct vectors * gvecs;
102 need_switch_pathgroup (struct multipath * mpp, int refresh)
104 struct pathgroup * pgp;
108 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
112 * Refresh path priority values
115 vector_foreach_slot (mpp->pg, pgp, i)
116 vector_foreach_slot (pgp->paths, pp, j)
117 pathinfo(pp, conf->hwtable, DI_PRIO);
119 mpp->bestpg = select_path_group(mpp);
121 if (mpp->bestpg != mpp->nextpg)
128 switch_pathgroup (struct multipath * mpp)
130 mpp->stat_switchgroup++;
131 dm_switchgroup(mpp->alias, mpp->bestpg);
132 condlog(2, "%s: switch to path group #%i",
133 mpp->alias, mpp->bestpg);
137 coalesce_maps(struct vectors *vecs, vector nmpv)
139 struct multipath * ompp;
140 vector ompv = vecs->mpvec;
143 vector_foreach_slot (ompv, ompp, i) {
144 condlog(3, "%s: coalesce map", ompp->alias);
145 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
147 * remove all current maps not allowed by the
148 * current configuration
150 if (dm_flush_map(ompp->alias)) {
151 condlog(0, "%s: unable to flush devmap",
154 * may be just because the device is open
156 if (setup_multipath(vecs, ompp) != 0) {
160 if (!vector_alloc_slot(nmpv))
163 vector_set_slot(nmpv, ompp);
165 vector_del_slot(ompv, i);
170 condlog(2, "%s devmap removed", ompp->alias);
172 } else if (conf->reassign_maps) {
173 condlog(3, "%s: Reassign existing device-mapper"
174 " devices", ompp->alias);
175 dm_reassign(ompp->alias);
182 sync_map_state(struct multipath *mpp)
184 struct pathgroup *pgp;
191 vector_foreach_slot (mpp->pg, pgp, i){
192 vector_foreach_slot (pgp->paths, pp, j){
193 if (pp->state == PATH_UNCHECKED ||
194 pp->state == PATH_WILD)
196 if ((pp->dmstate == PSTATE_FAILED ||
197 pp->dmstate == PSTATE_UNDEF) &&
198 (pp->state == PATH_UP || pp->state == PATH_GHOST))
199 dm_reinstate_path(mpp->alias, pp->dev_t);
200 else if ((pp->dmstate == PSTATE_ACTIVE ||
201 pp->dmstate == PSTATE_UNDEF) &&
202 (pp->state == PATH_DOWN ||
203 pp->state == PATH_SHAKY))
204 dm_fail_path(mpp->alias, pp->dev_t);
210 sync_maps_state(vector mpvec)
213 struct multipath *mpp;
215 vector_foreach_slot (mpvec, mpp, i)
220 flush_map(struct multipath * mpp, struct vectors * vecs)
223 * clear references to this map before flushing so we can ignore
224 * the spurious uevent we may generate with the dm_flush_map call below
226 if (dm_flush_map(mpp->alias)) {
228 * May not really be an error -- if the map was already flushed
229 * from the device mapper by dmsetup(8) for instance.
231 condlog(0, "%s: can't flush", mpp->alias);
236 condlog(2, "%s: map flushed", mpp->alias);
239 orphan_paths(vecs->pathvec, mpp);
240 remove_map_and_stop_waiter(mpp, vecs, 1);
246 uev_add_map (struct uevent * uev, struct vectors * vecs)
249 int major = -1, minor = -1, rc;
251 condlog(3, "%s: add map (uevent)", uev->kernel);
252 alias = uevent_get_dm_name(uev);
254 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
255 major = uevent_get_major(uev);
256 minor = uevent_get_minor(uev);
257 alias = dm_mapname(major, minor);
259 condlog(2, "%s: mapname not found for %d:%d",
260 uev->kernel, major, minor);
264 rc = ev_add_map(uev->kernel, alias, vecs);
270 ev_add_map (char * dev, char * alias, struct vectors * vecs)
273 struct multipath * mpp;
277 map_present = dm_map_present(alias);
279 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
280 condlog(4, "%s: not a multipath map", alias);
284 mpp = find_mp_by_alias(vecs->mpvec, alias);
288 * Not really an error -- we generate our own uevent
289 * if we create a multipath mapped device as a result
292 if (conf->reassign_maps) {
293 condlog(3, "%s: Reassign existing device-mapper devices",
299 condlog(2, "%s: adding map", alias);
302 * now we can register the map
304 if (map_present && (mpp = add_map_without_path(vecs, alias))) {
306 condlog(2, "%s: devmap %s registered", alias, dev);
309 r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
312 r = coalesce_paths(vecs, NULL, refwwid, 0);
317 condlog(2, "%s: devmap %s added", alias, dev);
319 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
321 condlog(0, "%s: uev_add_map %s failed", alias, dev);
328 uev_remove_map (struct uevent * uev, struct vectors * vecs)
332 struct multipath *mpp;
334 condlog(2, "%s: remove map (uevent)", uev->kernel);
335 alias = uevent_get_dm_name(uev);
337 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
340 minor = uevent_get_minor(uev);
341 mpp = find_mp_by_minor(vecs->mpvec, minor);
344 condlog(2, "%s: devmap not registered, can't remove",
348 if (strcmp(mpp->alias, alias)) {
349 condlog(2, "%s: minor number mismatch (map %d, event %d)",
350 mpp->alias, mpp->dmi->minor, minor);
354 orphan_paths(vecs->pathvec, mpp);
355 remove_map_and_stop_waiter(mpp, vecs, 1);
362 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
364 struct multipath * mpp;
366 mpp = find_mp_by_minor(vecs->mpvec, minor);
369 condlog(2, "%s: devmap not registered, can't remove",
373 if (strcmp(mpp->alias, alias)) {
374 condlog(2, "%s: minor number mismatch (map %d, event %d)",
375 mpp->alias, mpp->dmi->minor, minor);
378 return flush_map(mpp, vecs);
382 uev_add_path (struct uevent *uev, struct vectors * vecs)
387 condlog(2, "%s: add path (uevent)", uev->kernel);
388 if (strstr(uev->kernel, "..") != NULL) {
390 * Don't allow relative device names in the pathvec
392 condlog(0, "%s: path name is invalid", uev->kernel);
396 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
398 condlog(0, "%s: spurious uevent, path already in pathvec",
402 if (!strlen(pp->wwid)) {
403 udev_device_unref(pp->udev);
404 pp->udev = udev_device_ref(uev->udev);
405 ret = pathinfo(pp, conf->hwtable,
406 DI_ALL | DI_BLACKLIST);
408 i = find_slot(vecs->pathvec, (void *)pp);
410 vector_del_slot(vecs->pathvec, i);
413 } else if (ret == 1) {
414 condlog(0, "%s: failed to reinitialize path",
421 * get path vital state
423 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
424 uev->udev, DI_ALL, &pp);
428 condlog(0, "%s: failed to store path info",
432 pp->checkint = conf->checkint;
435 return ev_add_path(pp, vecs);
444 ev_add_path (struct path * pp, struct vectors * vecs)
446 struct multipath * mpp;
447 char empty_buff[WWID_SIZE] = {0};
448 char params[PARAMS_SIZE] = {0};
450 int start_waiter = 0;
453 * need path UID to go any further
455 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
456 condlog(0, "%s: failed to get path uid", pp->dev);
457 goto fail; /* leave path added to pathvec */
459 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
462 if ((!pp->size) || (mpp->size != pp->size)) {
464 condlog(0, "%s: failed to add new path %s, "
466 mpp->alias, pp->dev);
468 condlog(0, "%s: failed to add new path %s, "
469 "device size mismatch",
470 mpp->alias, pp->dev);
471 int i = find_slot(vecs->pathvec, (void *)pp);
473 vector_del_slot(vecs->pathvec, i);
478 condlog(4,"%s: adopting all paths for path %s",
479 mpp->alias, pp->dev);
480 if (adopt_paths(vecs->pathvec, mpp, 1))
481 goto fail; /* leave path added to pathvec */
483 verify_paths(mpp, vecs, NULL);
484 mpp->flush_on_last_del = FLUSH_UNDEF;
485 mpp->action = ACT_RELOAD;
489 condlog(0, "%s: failed to create new map,"
490 " device size is 0 ", pp->dev);
491 int i = find_slot(vecs->pathvec, (void *)pp);
493 vector_del_slot(vecs->pathvec, i);
498 condlog(4,"%s: creating new map", pp->dev);
499 if ((mpp = add_map_with_path(vecs, pp, 1))) {
500 mpp->action = ACT_CREATE;
502 * We don't depend on ACT_CREATE, as domap will
503 * set it to ACT_NOTHING when complete.
508 goto fail; /* leave path added to pathvec */
511 /* persistent reseravtion check*/
512 mpath_pr_event_handle(pp);
515 * push the map to the device-mapper
517 if (setup_map(mpp, params, PARAMS_SIZE)) {
518 condlog(0, "%s: failed to setup map for addition of new "
519 "path %s", mpp->alias, pp->dev);
523 * reload the map for the multipath mapped device
525 if (domap(mpp, params) <= 0) {
526 condlog(0, "%s: failed in domap for addition of new "
527 "path %s", mpp->alias, pp->dev);
529 * deal with asynchronous uevents :((
531 if (mpp->action == ACT_RELOAD && retries-- > 0) {
532 condlog(0, "%s: uev_add_path sleep", mpp->alias);
534 update_mpp_paths(mpp, vecs->pathvec);
537 else if (mpp->action == ACT_RELOAD)
538 condlog(0, "%s: giving up reload", mpp->alias);
545 * update our state from kernel regardless of create or reload
547 if (setup_multipath(vecs, mpp))
548 goto fail; /* if setup_multipath fails, it removes the map */
552 if ((mpp->action == ACT_CREATE ||
553 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
554 start_waiter_thread(mpp, vecs))
558 condlog(2, "%s [%s]: path added to devmap %s",
559 pp->dev, pp->dev_t, mpp->alias);
566 remove_map(mpp, vecs, 1);
568 orphan_path(pp, "failed to add path");
573 uev_remove_path (struct uevent *uev, struct vectors * vecs)
577 condlog(2, "%s: remove path (uevent)", uev->kernel);
578 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
581 /* Not an error; path might have been purged earlier */
582 condlog(0, "%s: path already removed", uev->kernel);
586 return ev_remove_path(pp, vecs);
590 ev_remove_path (struct path *pp, struct vectors * vecs)
592 struct multipath * mpp;
594 char params[PARAMS_SIZE] = {0};
597 * avoid referring to the map of an orphaned path
599 if ((mpp = pp->mpp)) {
601 * transform the mp->pg vector of vectors of paths
602 * into a mp->params string to feed the device-mapper
604 if (update_mpp_paths(mpp, vecs->pathvec)) {
605 condlog(0, "%s: failed to update paths",
609 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
610 vector_del_slot(mpp->paths, i);
613 * remove the map IFF removing the last path
615 if (VECTOR_SIZE(mpp->paths) == 0) {
616 char alias[WWID_SIZE];
619 * flush_map will fail if the device is open
621 strncpy(alias, mpp->alias, WWID_SIZE);
622 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
623 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
625 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
626 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
627 dm_queue_if_no_path(mpp->alias, 0);
629 if (!flush_map(mpp, vecs)) {
630 condlog(2, "%s: removed map after"
631 " removing all paths",
637 * Not an error, continue
641 if (setup_map(mpp, params, PARAMS_SIZE)) {
642 condlog(0, "%s: failed to setup map for"
643 " removal of path %s", mpp->alias, pp->dev);
649 mpp->action = ACT_RELOAD;
650 if (domap(mpp, params) <= 0) {
651 condlog(0, "%s: failed in domap for "
652 "removal of path %s",
653 mpp->alias, pp->dev);
657 * update our state from kernel
659 if (setup_multipath(vecs, mpp)) {
664 condlog(2, "%s [%s]: path removed from map %s",
665 pp->dev, pp->dev_t, mpp->alias);
670 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
671 vector_del_slot(vecs->pathvec, i);
678 remove_map_and_stop_waiter(mpp, vecs, 1);
683 uev_update_path (struct uevent *uev, struct vectors * vecs)
687 ro = uevent_get_disk_ro(uev);
692 condlog(2, "%s: update path write_protect to '%d' (uevent)",
694 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
696 condlog(0, "%s: spurious uevent, path not found",
701 retval = reload_map(vecs, pp->mpp, 0);
703 condlog(2, "%s: map %s reloaded (retval %d)",
704 uev->kernel, pp->mpp->alias, retval);
713 map_discovery (struct vectors * vecs)
715 struct multipath * mpp;
718 if (dm_get_maps(vecs->mpvec))
721 vector_foreach_slot (vecs->mpvec, mpp, i)
722 if (setup_multipath(vecs, mpp))
729 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
731 struct vectors * vecs;
736 vecs = (struct vectors *)trigger_data;
738 pthread_cleanup_push(cleanup_lock, &vecs->lock);
740 pthread_testcancel();
742 r = parse_cmd(str, reply, len, vecs);
745 *reply = STRDUP("fail\n");
746 *len = strlen(*reply) + 1;
749 else if (!r && *len == 0) {
750 *reply = STRDUP("ok\n");
751 *len = strlen(*reply) + 1;
754 /* else if (r < 0) leave *reply alone */
756 lock_cleanup_pop(vecs->lock);
761 uev_discard(char * devpath)
767 * keep only block devices, discard partitions
769 tmp = strstr(devpath, "/block/");
771 condlog(4, "no /block/ in '%s'", devpath);
774 if (sscanf(tmp, "/block/%10s", a) != 1 ||
775 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
776 condlog(4, "discard event on %s", devpath);
783 uev_trigger (struct uevent * uev, void * trigger_data)
786 struct vectors * vecs;
788 vecs = (struct vectors *)trigger_data;
790 if (uev_discard(uev->devpath))
793 pthread_cleanup_push(cleanup_lock, &vecs->lock);
795 pthread_testcancel();
799 * Add events are ignored here as the tables
800 * are not fully initialised then.
802 if (!strncmp(uev->kernel, "dm-", 3)) {
803 if (!strncmp(uev->action, "change", 6)) {
804 r = uev_add_map(uev, vecs);
807 if (!strncmp(uev->action, "remove", 6)) {
808 r = uev_remove_map(uev, vecs);
815 * path add/remove event
817 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
821 if (!strncmp(uev->action, "add", 3)) {
822 r = uev_add_path(uev, vecs);
825 if (!strncmp(uev->action, "remove", 6)) {
826 r = uev_remove_path(uev, vecs);
829 if (!strncmp(uev->action, "change", 6)) {
830 r = uev_update_path(uev, vecs);
835 lock_cleanup_pop(vecs->lock);
840 ueventloop (void * ap)
842 struct udev *udev = ap;
844 if (uevent_listen(udev))
845 condlog(0, "error starting uevent listener");
853 if (uevent_dispatch(&uev_trigger, ap))
854 condlog(0, "error starting uevent dispatcher");
859 uxlsnrloop (void * ap)
864 set_handler_callback(LIST+PATHS, cli_list_paths);
865 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
866 set_handler_callback(LIST+MAPS, cli_list_maps);
867 set_handler_callback(LIST+STATUS, cli_list_status);
868 set_handler_callback(LIST+DAEMON, cli_list_daemon);
869 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
870 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
871 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
872 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
873 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
874 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
875 set_handler_callback(LIST+CONFIG, cli_list_config);
876 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
877 set_handler_callback(LIST+DEVICES, cli_list_devices);
878 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
879 set_handler_callback(ADD+PATH, cli_add_path);
880 set_handler_callback(DEL+PATH, cli_del_path);
881 set_handler_callback(ADD+MAP, cli_add_map);
882 set_handler_callback(DEL+MAP, cli_del_map);
883 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
884 set_handler_callback(RECONFIGURE, cli_reconfigure);
885 set_handler_callback(SUSPEND+MAP, cli_suspend);
886 set_handler_callback(RESUME+MAP, cli_resume);
887 set_handler_callback(RESIZE+MAP, cli_resize);
888 set_handler_callback(RELOAD+MAP, cli_reload);
889 set_handler_callback(RESET+MAP, cli_reassign);
890 set_handler_callback(REINSTATE+PATH, cli_reinstate);
891 set_handler_callback(FAIL+PATH, cli_fail);
892 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
893 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
894 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
895 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
896 set_handler_callback(QUIT, cli_quit);
897 set_handler_callback(SHUTDOWN, cli_shutdown);
898 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
899 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
900 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
901 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
902 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
905 uxsock_listen(&uxsock_trigger, ap);
919 switch (running_state) {
924 case DAEMON_CONFIGURE:
928 case DAEMON_SHUTDOWN:
935 fail_path (struct path * pp, int del_active)
940 condlog(2, "checker failed path %s in map %s",
941 pp->dev_t, pp->mpp->alias);
943 dm_fail_path(pp->mpp->alias, pp->dev_t);
945 update_queue_mode_del_path(pp->mpp);
949 * caller must have locked the path list before calling that function
952 reinstate_path (struct path * pp, int add_active)
957 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
958 condlog(0, "%s: reinstate failed", pp->dev_t);
960 condlog(2, "%s: reinstated", pp->dev_t);
962 update_queue_mode_add_path(pp->mpp);
967 enable_group(struct path * pp)
969 struct pathgroup * pgp;
972 * if path is added through uev_add_path, pgindex can be unset.
973 * next update_strings() will set it, upon map reload event.
975 * we can safely return here, because upon map reload, all
976 * PG will be enabled.
978 if (!pp->mpp->pg || !pp->pgindex)
981 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
983 if (pgp->status == PGSTATE_DISABLED) {
984 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
985 dm_enablegroup(pp->mpp->alias, pp->pgindex);
990 mpvec_garbage_collector (struct vectors * vecs)
992 struct multipath * mpp;
998 vector_foreach_slot (vecs->mpvec, mpp, i) {
999 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1000 condlog(2, "%s: remove dead map", mpp->alias);
1001 remove_map_and_stop_waiter(mpp, vecs, 1);
1007 /* This is called after a path has started working again. It the multipath
1008 * device for this path uses the followover failback type, and this is the
1009 * best pathgroup, and this is the first path in the pathgroup to come back
1010 * up, then switch to this pathgroup */
1012 followover_should_failback(struct path * pp)
1014 struct pathgroup * pgp;
1018 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1019 !pp->mpp->pg || !pp->pgindex ||
1020 pp->pgindex != pp->mpp->bestpg)
1023 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1024 vector_foreach_slot(pgp->paths, pp1, i) {
1027 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1034 defered_failback_tick (vector mpvec)
1036 struct multipath * mpp;
1039 vector_foreach_slot (mpvec, mpp, i) {
1041 * defered failback getting sooner
1043 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1044 mpp->failback_tick--;
1046 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1047 switch_pathgroup(mpp);
1053 retry_count_tick(vector mpvec)
1055 struct multipath *mpp;
1058 vector_foreach_slot (mpvec, mpp, i) {
1059 if (mpp->retry_tick) {
1060 mpp->stat_total_queueing_time++;
1061 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1062 if(--mpp->retry_tick == 0) {
1063 dm_queue_if_no_path(mpp->alias, 0);
1064 condlog(2, "%s: Disable queueing", mpp->alias);
1070 int update_prio(struct path *pp, int refresh_all)
1074 struct pathgroup * pgp;
1075 int i, j, changed = 0;
1078 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1079 vector_foreach_slot (pgp->paths, pp1, j) {
1080 oldpriority = pp1->priority;
1081 pathinfo(pp1, conf->hwtable, DI_PRIO);
1082 if (pp1->priority != oldpriority)
1088 oldpriority = pp->priority;
1089 pathinfo(pp, conf->hwtable, DI_PRIO);
1091 if (pp->priority == oldpriority)
1096 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1098 if (reload_map(vecs, mpp, refresh))
1102 if (setup_multipath(vecs, mpp) != 0)
1104 sync_map_state(mpp);
1110 check_path (struct vectors * vecs, struct path * pp)
1113 int new_path_up = 0;
1114 int chkr_new_path_up = 0;
1115 int oldchkrstate = pp->chkrstate;
1120 if (pp->tick && --pp->tick)
1121 return 0; /* don't check this path yet */
1124 * provision a next check soonest,
1125 * in case we exit abnormaly from here
1127 pp->tick = conf->checkint;
1129 newstate = path_offline(pp);
1130 if (newstate == PATH_REMOVED) {
1131 condlog(2, "%s: remove path (checker)", pp->dev);
1132 ev_remove_path(pp, vecs);
1135 if (newstate == PATH_UP)
1136 newstate = get_state(pp, 1);
1138 checker_clear_message(&pp->checker);
1140 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1141 condlog(2, "%s: unusable path", pp->dev);
1142 pathinfo(pp, conf->hwtable, 0);
1146 * Async IO in flight. Keep the previous path state
1147 * and reschedule as soon as possible
1149 if (newstate == PATH_PENDING) {
1154 * Synchronize with kernel state
1156 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1157 condlog(1, "%s: Could not synchronize with kernel state",
1159 pp->dmstate = PSTATE_UNDEF;
1161 pp->chkrstate = newstate;
1162 if (newstate != pp->state) {
1163 int oldstate = pp->state;
1164 pp->state = newstate;
1166 if (strlen(checker_message(&pp->checker)))
1167 LOG_MSG(1, checker_message(&pp->checker));
1170 * upon state change, reset the checkint
1171 * to the shortest delay
1173 pp->checkint = conf->checkint;
1175 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1177 * proactively fail path in the DM
1179 if (oldstate == PATH_UP ||
1180 oldstate == PATH_GHOST)
1186 * cancel scheduled failback
1188 pp->mpp->failback_tick = 0;
1190 pp->mpp->stat_path_failures++;
1194 if(newstate == PATH_UP || newstate == PATH_GHOST){
1195 if ( pp->mpp && pp->mpp->prflag ){
1197 * Check Persistent Reservation.
1199 condlog(2, "%s: checking persistent reservation "
1200 "registration", pp->dev);
1201 mpath_pr_event_handle(pp);
1206 * reinstate this path
1208 if (oldstate != PATH_UP &&
1209 oldstate != PATH_GHOST)
1210 reinstate_path(pp, 1);
1212 reinstate_path(pp, 0);
1216 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1217 chkr_new_path_up = 1;
1220 * if at least one path is up in a group, and
1221 * the group is disabled, re-enable it
1223 if (newstate == PATH_UP)
1226 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1227 if (pp->dmstate == PSTATE_FAILED ||
1228 pp->dmstate == PSTATE_UNDEF) {
1229 /* Clear IO errors */
1230 reinstate_path(pp, 0);
1232 LOG_MSG(4, checker_message(&pp->checker));
1233 if (pp->checkint != conf->max_checkint) {
1235 * double the next check delay.
1236 * max at conf->max_checkint
1238 if (pp->checkint < (conf->max_checkint / 2))
1239 pp->checkint = 2 * pp->checkint;
1241 pp->checkint = conf->max_checkint;
1243 condlog(4, "%s: delay next check %is",
1244 pp->dev_t, pp->checkint);
1246 pp->tick = pp->checkint;
1249 else if (newstate == PATH_DOWN &&
1250 strlen(checker_message(&pp->checker))) {
1251 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1252 LOG_MSG(3, checker_message(&pp->checker));
1254 LOG_MSG(2, checker_message(&pp->checker));
1257 pp->state = newstate;
1260 * path prio refreshing
1262 condlog(4, "path prio refresh");
1264 if (update_prio(pp, new_path_up) &&
1265 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1266 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1267 update_path_groups(pp->mpp, vecs, !new_path_up);
1268 else if (need_switch_pathgroup(pp->mpp, 0)) {
1269 if (pp->mpp->pgfailback > 0 &&
1270 (new_path_up || pp->mpp->failback_tick <= 0))
1271 pp->mpp->failback_tick =
1272 pp->mpp->pgfailback + 1;
1273 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1274 (chkr_new_path_up && followover_should_failback(pp)))
1275 switch_pathgroup(pp->mpp);
1281 checkerloop (void *ap)
1283 struct vectors *vecs;
1288 mlockall(MCL_CURRENT | MCL_FUTURE);
1289 vecs = (struct vectors *)ap;
1290 condlog(2, "path checkers start up");
1293 * init the path check interval
1295 vector_foreach_slot (vecs->pathvec, pp, i) {
1296 pp->checkint = conf->checkint;
1300 struct timeval diff_time, start_time, end_time;
1303 if (gettimeofday(&start_time, NULL) != 0)
1304 start_time.tv_sec = 0;
1305 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1307 pthread_testcancel();
1311 sd_notify(0, "WATCHDOG=1");
1313 if (vecs->pathvec) {
1314 vector_foreach_slot (vecs->pathvec, pp, i) {
1315 num_paths += check_path(vecs, pp);
1319 defered_failback_tick(vecs->mpvec);
1320 retry_count_tick(vecs->mpvec);
1325 condlog(4, "map garbage collection");
1326 mpvec_garbage_collector(vecs);
1330 lock_cleanup_pop(vecs->lock);
1331 if (start_time.tv_sec &&
1332 gettimeofday(&end_time, NULL) == 0 &&
1334 timersub(&end_time, &start_time, &diff_time);
1335 condlog(3, "checked %d path%s in %lu.%06lu secs",
1336 num_paths, num_paths > 1 ? "s" : "",
1337 diff_time.tv_sec, diff_time.tv_usec);
1345 configure (struct vectors * vecs, int start_waiters)
1347 struct multipath * mpp;
1352 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1355 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1358 if (!(mpvec = vector_alloc()))
1362 * probe for current path (from sysfs) and map (from dm) sets
1364 path_discovery(vecs->pathvec, conf, DI_ALL);
1366 vector_foreach_slot (vecs->pathvec, pp, i){
1367 if (filter_path(conf, pp) > 0){
1368 vector_del_slot(vecs->pathvec, i);
1373 pp->checkint = conf->checkint;
1375 if (map_discovery(vecs))
1379 * create new set of maps & push changed ones into dm
1381 if (coalesce_paths(vecs, mpvec, NULL, 1))
1385 * may need to remove some maps which are no longer relevant
1386 * e.g., due to blacklist changes in conf file
1388 if (coalesce_maps(vecs, mpvec))
1393 sync_maps_state(mpvec);
1394 vector_foreach_slot(mpvec, mpp, i){
1395 remember_wwid(mpp->wwid);
1400 * purge dm of old maps
1405 * save new set of maps formed by considering current path state
1407 vector_free(vecs->mpvec);
1408 vecs->mpvec = mpvec;
1411 * start dm event waiter threads for these new maps
1413 vector_foreach_slot(vecs->mpvec, mpp, i) {
1414 if (setup_multipath(vecs, mpp))
1417 if (start_waiter_thread(mpp, vecs))
1424 reconfigure (struct vectors * vecs)
1426 struct config * old = conf;
1430 * free old map and path vectors ... they use old conf state
1432 if (VECTOR_SIZE(vecs->mpvec))
1433 remove_maps_and_stop_waiters(vecs);
1435 if (VECTOR_SIZE(vecs->pathvec))
1436 free_pathvec(vecs->pathvec, FREE_PATHS);
1438 vecs->pathvec = NULL;
1441 /* Re-read any timezone changes */
1444 if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1445 conf->verbosity = old->verbosity;
1455 static struct vectors *
1458 struct vectors * vecs;
1460 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1466 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1468 if (!vecs->lock.mutex)
1471 pthread_mutex_init(vecs->lock.mutex, NULL);
1472 vecs->lock.depth = 0;
1478 condlog(0, "failed to init paths");
1483 signal_set(int signo, void (*func) (int))
1486 struct sigaction sig;
1487 struct sigaction osig;
1489 sig.sa_handler = func;
1490 sigemptyset(&sig.sa_mask);
1493 r = sigaction(signo, &sig, &osig);
1498 return (osig.sa_handler);
1502 handle_signals(void)
1504 if (reconfig_sig && running_state == DAEMON_RUNNING) {
1505 condlog(2, "reconfigure (signal)");
1506 pthread_cleanup_push(cleanup_lock,
1509 pthread_testcancel();
1511 lock_cleanup_pop(gvecs->lock);
1513 if (log_reset_sig) {
1514 condlog(2, "reset log (signal)");
1515 pthread_mutex_lock(&logq_lock);
1516 log_reset("multipathd");
1517 pthread_mutex_unlock(&logq_lock);
1544 condlog(3, "SIGUSR2 received");
1553 sigaddset(&set, SIGHUP);
1554 sigaddset(&set, SIGUSR1);
1555 sigaddset(&set, SIGUSR2);
1556 pthread_sigmask(SIG_BLOCK, &set, NULL);
1558 signal_set(SIGHUP, sighup);
1559 signal_set(SIGUSR1, sigusr1);
1560 signal_set(SIGUSR2, sigusr2);
1561 signal_set(SIGINT, sigend);
1562 signal_set(SIGTERM, sigend);
1563 signal(SIGPIPE, SIG_IGN);
1570 static struct sched_param sched_param = {
1571 .sched_priority = 99
1574 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1577 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1584 #ifdef OOM_SCORE_ADJ_MIN
1586 char *file = "/proc/self/oom_score_adj";
1587 int score = OOM_SCORE_ADJ_MIN;
1590 char *file = "/proc/self/oom_adj";
1591 int score = OOM_ADJUST_MIN;
1597 envp = getenv("OOMScoreAdjust");
1599 condlog(3, "Using systemd provided OOMScoreAdjust");
1603 if (stat(file, &st) == 0){
1604 fp = fopen(file, "w");
1606 condlog(0, "couldn't fopen %s : %s", file,
1610 fprintf(fp, "%i", score);
1614 if (errno != ENOENT) {
1615 condlog(0, "couldn't stat %s : %s", file,
1619 #ifdef OOM_ADJUST_MIN
1620 file = "/proc/self/oom_adj";
1621 score = OOM_ADJUST_MIN;
1626 condlog(0, "couldn't adjust oom score");
1630 child (void * param)
1632 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1633 pthread_attr_t log_attr, misc_attr, uevent_attr;
1634 struct vectors * vecs;
1635 struct multipath * mpp;
1638 unsigned long checkint;
1643 mlockall(MCL_CURRENT | MCL_FUTURE);
1644 sem_init(&exit_sem, 0, 0);
1649 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1650 setup_thread_attr(&uevent_attr, 128 * 1024, 1);
1651 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1654 setup_thread_attr(&log_attr, 64 * 1024, 0);
1655 log_thread_start(&log_attr);
1656 pthread_attr_destroy(&log_attr);
1659 running_state = DAEMON_START;
1662 sd_notify(0, "STATUS=startup");
1664 condlog(2, "--------start up--------");
1665 condlog(2, "read " DEFAULT_CONFIGFILE);
1667 if (load_config(DEFAULT_CONFIGFILE, udev))
1670 if (init_checkers()) {
1671 condlog(0, "failed to initialize checkers");
1675 condlog(0, "failed to initialize prioritizers");
1679 setlogmask(LOG_UPTO(conf->verbosity + 3));
1681 envp = getenv("LimitNOFILE");
1684 condlog(2,"Using systemd provided open fds limit of %s", envp);
1685 } else if (conf->max_fds) {
1686 struct rlimit fd_limit;
1688 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1689 condlog(0, "can't get open fds limit: %s",
1691 fd_limit.rlim_cur = 0;
1692 fd_limit.rlim_max = 0;
1694 if (fd_limit.rlim_cur < conf->max_fds) {
1695 fd_limit.rlim_cur = conf->max_fds;
1696 if (fd_limit.rlim_max < conf->max_fds)
1697 fd_limit.rlim_max = conf->max_fds;
1698 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1699 condlog(0, "can't set open fds limit to "
1701 fd_limit.rlim_cur, fd_limit.rlim_max,
1704 condlog(3, "set open fds limit to %lu/%lu",
1705 fd_limit.rlim_cur, fd_limit.rlim_max);
1711 vecs = gvecs = init_vecs();
1719 udev_set_sync_support(0);
1721 envp = getenv("WATCHDOG_USEC");
1722 if (envp && sscanf(envp, "%lu", &checkint) == 1) {
1723 /* Value is in microseconds */
1724 conf->max_checkint = checkint / 1000000;
1725 /* Rescale checkint */
1726 if (conf->checkint > conf->max_checkint)
1727 conf->checkint = conf->max_checkint;
1729 conf->checkint = conf->max_checkint / 4;
1730 condlog(3, "enabling watchdog, interval %d max %d",
1731 conf->checkint, conf->max_checkint);
1732 conf->watchdog = conf->checkint;
1736 * Start uevent listener early to catch events
1738 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
1739 condlog(0, "failed to create uevent thread: %d", rc);
1742 pthread_attr_destroy(&uevent_attr);
1743 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1744 condlog(0, "failed to create cli listener: %d", rc);
1748 * fetch and configure both paths and multipaths
1751 sd_notify(0, "STATUS=configure");
1753 running_state = DAEMON_CONFIGURE;
1756 if (configure(vecs, 1)) {
1758 condlog(0, "failure during configuration");
1766 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1767 condlog(0,"failed to create checker loop thread: %d", rc);
1770 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1771 condlog(0, "failed to create uevent dispatcher: %d", rc);
1774 pthread_attr_destroy(&misc_attr);
1776 /* Startup complete, create logfile */
1777 pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1778 /* Ignore errors, we can live without */
1780 running_state = DAEMON_RUNNING;
1782 sd_notify(0, "READY=1\nSTATUS=running");
1788 while(sem_wait(&exit_sem) != 0); /* Do nothing */
1791 sd_notify(0, "STATUS=shutdown");
1793 running_state = DAEMON_SHUTDOWN;
1795 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1796 vector_foreach_slot(vecs->mpvec, mpp, i)
1797 dm_queue_if_no_path(mpp->alias, 0);
1798 remove_maps_and_stop_waiters(vecs);
1801 pthread_cancel(check_thr);
1802 pthread_cancel(uevent_thr);
1803 pthread_cancel(uxlsnr_thr);
1804 pthread_cancel(uevq_thr);
1807 free_pathvec(vecs->pathvec, FREE_PATHS);
1808 vecs->pathvec = NULL;
1810 /* Now all the waitevent threads will start rushing in. */
1811 while (vecs->lock.depth > 0) {
1812 sleep (1); /* This is weak. */
1813 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1814 " waiting...", vecs->lock.depth);
1816 pthread_mutex_destroy(vecs->lock.mutex);
1817 FREE(vecs->lock.mutex);
1818 vecs->lock.depth = 0;
1819 vecs->lock.mutex = NULL;
1829 /* We're done here */
1831 condlog(3, "unlink pidfile");
1832 unlink(DEFAULT_PIDFILE);
1835 condlog(2, "--------shut down-------");
1841 * Freeing config must be done after condlog() and dm_lib_exit(),
1842 * because logging functions like dlog() and dm_write_log()
1843 * reference the config.
1850 dbg_free_final(NULL);
1854 sd_notify(0, "ERRNO=0");
1860 sd_notify(0, "ERRNO=1");
1871 if( (pid = fork()) < 0){
1872 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1880 if ( (pid = fork()) < 0)
1881 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1886 fprintf(stderr, "cannot chdir to '/', continuing\n");
1888 dev_null_fd = open("/dev/null", O_RDWR);
1889 if (dev_null_fd < 0){
1890 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1895 close(STDIN_FILENO);
1896 if (dup(dev_null_fd) < 0) {
1897 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1901 close(STDOUT_FILENO);
1902 if (dup(dev_null_fd) < 0) {
1903 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1907 close(STDERR_FILENO);
1908 if (dup(dev_null_fd) < 0) {
1909 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1914 daemon_pid = getpid();
1919 main (int argc, char *argv[])
1921 extern char *optarg;
1927 running_state = DAEMON_INIT;
1930 if (getuid() != 0) {
1931 fprintf(stderr, "need to be root\n");
1935 /* make sure we don't lock any path */
1937 fprintf(stderr, "can't chdir to root directory : %s\n",
1939 umask(umask(077) | 022);
1941 conf = alloc_config();
1946 while ((arg = getopt(argc, argv, ":dsv:k::")) != EOF ) {
1950 //debug=1; /* ### comment me out ### */
1953 if (sizeof(optarg) > sizeof(char *) ||
1954 !isdigit(optarg[0]))
1957 conf->verbosity = atoi(optarg);
1969 if (optind < argc) {
1974 while (optind < argc) {
1975 if (strchr(argv[optind], ' '))
1976 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1978 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1981 c += snprintf(c, s + CMDSIZE - c, "\n");
1999 return (child(NULL));
2002 void * mpath_pr_event_handler_fn (void * pathp )
2004 struct multipath * mpp;
2005 int i,j, ret, isFound;
2006 struct path * pp = (struct path *)pathp;
2007 unsigned char *keyp;
2009 struct prout_param_descriptor *param;
2010 struct prin_resp *resp;
2014 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2016 condlog(0,"%s Alloc failed for prin response", pp->dev);
2020 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2021 if (ret != MPATH_PR_SUCCESS )
2023 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2027 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2028 resp->prin_descriptor.prin_readkeys.additional_length );
2030 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2032 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2033 ret = MPATH_PR_SUCCESS;
2037 keyp = (unsigned char *)mpp->reservation_key;
2038 for (j = 0; j < 8; ++j) {
2044 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
2047 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2049 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
2050 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2051 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2053 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2060 condlog(0, "%s: Either device not registered or ", pp->dev);
2061 condlog(0, "host is not authorised for registration. Skip path");
2062 ret = MPATH_PR_OTHER;
2066 param= malloc(sizeof(struct prout_param_descriptor));
2067 memset(param, 0 , sizeof(struct prout_param_descriptor));
2069 for (j = 7; j >= 0; --j) {
2070 param->sa_key[j] = (prkey & 0xff);
2073 param->num_transportid = 0;
2075 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2077 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2078 if (ret != MPATH_PR_SUCCESS )
2080 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2090 int mpath_pr_event_handle(struct path *pp)
2094 pthread_attr_t attr;
2095 struct multipath * mpp;
2099 if (!mpp->reservation_key)
2102 pthread_attr_init(&attr);
2103 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2105 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2107 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2110 pthread_attr_destroy(&attr);
2111 rc = pthread_join(thread, NULL);