2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
20 #include <semaphore.h>
21 #include <mpath_persist.h>
39 #include <blacklist.h>
40 #include <structs_vec.h>
42 #include <devmapper.h>
45 #include <discovery.h>
49 #include <switchgroup.h>
51 #include <configure.h>
53 #include <pgpolicies.h>
62 #include "cli_handlers.h"
67 #define FILE_NAME_SIZE 256
70 #define LOG_MSG(a, b) \
73 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
75 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
78 struct mpath_event_param
81 struct multipath *mpp;
84 unsigned int mpath_mx_alloc_len;
87 enum daemon_status running_state;
90 static sem_t exit_sem;
92 * global copy of vecs for use in sig handlers
94 struct vectors * gvecs;
99 need_switch_pathgroup (struct multipath * mpp, int refresh)
101 struct pathgroup * pgp;
105 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
109 * Refresh path priority values
112 vector_foreach_slot (mpp->pg, pgp, i)
113 vector_foreach_slot (pgp->paths, pp, j)
114 pathinfo(pp, conf->hwtable, DI_PRIO);
116 mpp->bestpg = select_path_group(mpp);
118 if (mpp->bestpg != mpp->nextpg)
125 switch_pathgroup (struct multipath * mpp)
127 mpp->stat_switchgroup++;
128 dm_switchgroup(mpp->alias, mpp->bestpg);
129 condlog(2, "%s: switch to path group #%i",
130 mpp->alias, mpp->bestpg);
134 coalesce_maps(struct vectors *vecs, vector nmpv)
136 struct multipath * ompp;
137 vector ompv = vecs->mpvec;
141 vector_foreach_slot (ompv, ompp, i) {
142 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
144 * remove all current maps not allowed by the
145 * current configuration
147 if (dm_flush_map(ompp->alias)) {
148 condlog(0, "%s: unable to flush devmap",
151 * may be just because the device is open
153 if (!vector_alloc_slot(nmpv))
156 vector_set_slot(nmpv, ompp);
157 setup_multipath(vecs, ompp);
159 if ((j = find_slot(ompv, (void *)ompp)) != -1)
160 vector_del_slot(ompv, j);
166 condlog(2, "%s devmap removed", ompp->alias);
168 } else if (conf->reassign_maps) {
169 condlog(3, "%s: Reassign existing device-mapper"
170 " devices", ompp->alias);
171 dm_reassign(ompp->alias);
178 sync_map_state(struct multipath *mpp)
180 struct pathgroup *pgp;
187 vector_foreach_slot (mpp->pg, pgp, i){
188 vector_foreach_slot (pgp->paths, pp, j){
189 if (pp->state == PATH_UNCHECKED ||
190 pp->state == PATH_WILD)
192 if ((pp->dmstate == PSTATE_FAILED ||
193 pp->dmstate == PSTATE_UNDEF) &&
194 (pp->state == PATH_UP || pp->state == PATH_GHOST))
195 dm_reinstate_path(mpp->alias, pp->dev_t);
196 else if ((pp->dmstate == PSTATE_ACTIVE ||
197 pp->dmstate == PSTATE_UNDEF) &&
198 (pp->state == PATH_DOWN ||
199 pp->state == PATH_SHAKY))
200 dm_fail_path(mpp->alias, pp->dev_t);
206 sync_maps_state(vector mpvec)
209 struct multipath *mpp;
211 vector_foreach_slot (mpvec, mpp, i)
216 flush_map(struct multipath * mpp, struct vectors * vecs)
219 * clear references to this map before flushing so we can ignore
220 * the spurious uevent we may generate with the dm_flush_map call below
222 if (dm_flush_map(mpp->alias)) {
224 * May not really be an error -- if the map was already flushed
225 * from the device mapper by dmsetup(8) for instance.
227 condlog(0, "%s: can't flush", mpp->alias);
232 condlog(2, "%s: devmap removed", mpp->alias);
235 orphan_paths(vecs->pathvec, mpp);
236 remove_map_and_stop_waiter(mpp, vecs, 1);
242 uev_add_map (struct uevent * uev, struct vectors * vecs)
245 int major = -1, minor = -1, rc;
247 condlog(3, "%s: add map (uevent)", uev->kernel);
248 alias = uevent_get_dm_name(uev);
250 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
251 major = uevent_get_major(uev);
252 minor = uevent_get_minor(uev);
253 alias = dm_mapname(major, minor);
255 condlog(2, "%s: mapname not found for %d:%d",
256 uev->kernel, major, minor);
260 rc = ev_add_map(uev->kernel, alias, vecs);
266 ev_add_map (char * dev, char * alias, struct vectors * vecs)
269 struct multipath * mpp;
273 map_present = dm_map_present(alias);
275 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
276 condlog(4, "%s: not a multipath map", alias);
280 mpp = find_mp_by_alias(vecs->mpvec, alias);
284 * Not really an error -- we generate our own uevent
285 * if we create a multipath mapped device as a result
288 if (conf->reassign_maps) {
289 condlog(3, "%s: Reassign existing device-mapper devices",
295 condlog(2, "%s: adding map", alias);
298 * now we can register the map
300 if (map_present && (mpp = add_map_without_path(vecs, alias))) {
302 condlog(2, "%s: devmap %s registered", alias, dev);
305 r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
308 r = coalesce_paths(vecs, NULL, refwwid, 0);
313 condlog(2, "%s: devmap %s added", alias, dev);
315 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
317 condlog(0, "%s: uev_add_map %s failed", alias, dev);
324 uev_remove_map (struct uevent * uev, struct vectors * vecs)
328 struct multipath *mpp;
330 condlog(2, "%s: remove map (uevent)", uev->kernel);
331 alias = uevent_get_dm_name(uev);
333 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
336 minor = uevent_get_minor(uev);
337 mpp = find_mp_by_minor(vecs->mpvec, minor);
340 condlog(2, "%s: devmap not registered, can't remove",
344 if (strcmp(mpp->alias, alias)) {
345 condlog(2, "%s: minor number mismatch (map %d, event %d)",
346 mpp->alias, mpp->dmi->minor, minor);
350 orphan_paths(vecs->pathvec, mpp);
351 remove_map_and_stop_waiter(mpp, vecs, 1);
358 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
360 struct multipath * mpp;
362 mpp = find_mp_by_minor(vecs->mpvec, minor);
365 condlog(2, "%s: devmap not registered, can't remove",
369 if (strcmp(mpp->alias, alias)) {
370 condlog(2, "%s: minor number mismatch (map %d, event %d)",
371 mpp->alias, mpp->dmi->minor, minor);
374 return flush_map(mpp, vecs);
378 uev_add_path (struct uevent *uev, struct vectors * vecs)
383 condlog(2, "%s: add path (uevent)", uev->kernel);
384 if (strstr(uev->kernel, "..") != NULL) {
386 * Don't allow relative device names in the pathvec
388 condlog(0, "%s: path name is invalid", uev->kernel);
392 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
394 condlog(0, "%s: spurious uevent, path already in pathvec",
398 if (!strlen(pp->wwid)) {
399 udev_device_unref(pp->udev);
400 pp->udev = udev_device_ref(uev->udev);
401 ret = pathinfo(pp, conf->hwtable,
402 DI_ALL | DI_BLACKLIST);
404 i = find_slot(vecs->pathvec, (void *)pp);
406 vector_del_slot(vecs->pathvec, i);
409 } else if (ret == 1) {
410 condlog(0, "%s: failed to reinitialize path",
417 * get path vital state
419 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
420 uev->udev, DI_ALL, &pp);
424 condlog(0, "%s: failed to store path info",
428 pp->checkint = conf->checkint;
431 return ev_add_path(pp, vecs);
440 ev_add_path (struct path * pp, struct vectors * vecs)
442 struct multipath * mpp;
443 char empty_buff[WWID_SIZE] = {0};
444 char params[PARAMS_SIZE] = {0};
446 int start_waiter = 0;
449 * need path UID to go any further
451 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
452 condlog(0, "%s: failed to get path uid", pp->dev);
453 goto fail; /* leave path added to pathvec */
455 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
458 if ((!pp->size) || (mpp->size != pp->size)) {
460 condlog(0, "%s: failed to add new path %s, "
462 mpp->alias, pp->dev);
464 condlog(0, "%s: failed to add new path %s, "
465 "device size mismatch",
466 mpp->alias, pp->dev);
467 int i = find_slot(vecs->pathvec, (void *)pp);
469 vector_del_slot(vecs->pathvec, i);
474 condlog(4,"%s: adopting all paths for path %s",
475 mpp->alias, pp->dev);
476 if (adopt_paths(vecs->pathvec, mpp, 1))
477 goto fail; /* leave path added to pathvec */
479 verify_paths(mpp, vecs, NULL);
480 mpp->flush_on_last_del = FLUSH_UNDEF;
481 mpp->action = ACT_RELOAD;
485 condlog(0, "%s: failed to create new map,"
486 " device size is 0 ", pp->dev);
487 int i = find_slot(vecs->pathvec, (void *)pp);
489 vector_del_slot(vecs->pathvec, i);
494 condlog(4,"%s: creating new map", pp->dev);
495 if ((mpp = add_map_with_path(vecs, pp, 1))) {
496 mpp->action = ACT_CREATE;
498 * We don't depend on ACT_CREATE, as domap will
499 * set it to ACT_NOTHING when complete.
504 goto fail; /* leave path added to pathvec */
507 /* persistent reseravtion check*/
508 mpath_pr_event_handle(pp);
511 * push the map to the device-mapper
513 if (setup_map(mpp, params, PARAMS_SIZE)) {
514 condlog(0, "%s: failed to setup map for addition of new "
515 "path %s", mpp->alias, pp->dev);
519 * reload the map for the multipath mapped device
521 if (domap(mpp, params) <= 0) {
522 condlog(0, "%s: failed in domap for addition of new "
523 "path %s", mpp->alias, pp->dev);
525 * deal with asynchronous uevents :((
527 if (mpp->action == ACT_RELOAD && retries-- > 0) {
528 condlog(0, "%s: uev_add_path sleep", mpp->alias);
530 update_mpp_paths(mpp, vecs->pathvec);
533 else if (mpp->action == ACT_RELOAD)
534 condlog(0, "%s: giving up reload", mpp->alias);
541 * update our state from kernel regardless of create or reload
543 if (setup_multipath(vecs, mpp))
544 goto fail; /* if setup_multipath fails, it removes the map */
548 if ((mpp->action == ACT_CREATE ||
549 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
550 start_waiter_thread(mpp, vecs))
554 condlog(2, "%s [%s]: path added to devmap %s",
555 pp->dev, pp->dev_t, mpp->alias);
562 remove_map(mpp, vecs, 1);
569 uev_remove_path (struct uevent *uev, struct vectors * vecs)
573 condlog(2, "%s: remove path (uevent)", uev->kernel);
574 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
577 /* Not an error; path might have been purged earlier */
578 condlog(0, "%s: path already removed", uev->kernel);
582 return ev_remove_path(pp, vecs);
586 ev_remove_path (struct path *pp, struct vectors * vecs)
588 struct multipath * mpp;
590 char params[PARAMS_SIZE] = {0};
593 * avoid referring to the map of an orphaned path
595 if ((mpp = pp->mpp)) {
597 * transform the mp->pg vector of vectors of paths
598 * into a mp->params string to feed the device-mapper
600 if (update_mpp_paths(mpp, vecs->pathvec)) {
601 condlog(0, "%s: failed to update paths",
605 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
606 vector_del_slot(mpp->paths, i);
609 * remove the map IFF removing the last path
611 if (VECTOR_SIZE(mpp->paths) == 0) {
612 char alias[WWID_SIZE];
615 * flush_map will fail if the device is open
617 strncpy(alias, mpp->alias, WWID_SIZE);
618 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
619 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
621 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
622 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
623 dm_queue_if_no_path(mpp->alias, 0);
625 if (!flush_map(mpp, vecs)) {
626 condlog(2, "%s: removed map after"
627 " removing all paths",
633 * Not an error, continue
637 if (setup_map(mpp, params, PARAMS_SIZE)) {
638 condlog(0, "%s: failed to setup map for"
639 " removal of path %s", mpp->alias, pp->dev);
645 mpp->action = ACT_RELOAD;
646 if (domap(mpp, params) <= 0) {
647 condlog(0, "%s: failed in domap for "
648 "removal of path %s",
649 mpp->alias, pp->dev);
653 * update our state from kernel
655 if (setup_multipath(vecs, mpp)) {
660 condlog(2, "%s [%s]: path removed from map %s",
661 pp->dev, pp->dev_t, mpp->alias);
666 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
667 vector_del_slot(vecs->pathvec, i);
674 remove_map_and_stop_waiter(mpp, vecs, 1);
679 uev_update_path (struct uevent *uev, struct vectors * vecs)
683 ro = uevent_get_disk_ro(uev);
688 condlog(2, "%s: update path write_protect to '%d' (uevent)",
690 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
692 condlog(0, "%s: spurious uevent, path not found",
697 retval = reload_map(vecs, pp->mpp, 0);
699 condlog(2, "%s: map %s reloaded (retval %d)",
700 uev->kernel, pp->mpp->alias, retval);
709 map_discovery (struct vectors * vecs)
711 struct multipath * mpp;
714 if (dm_get_maps(vecs->mpvec))
717 vector_foreach_slot (vecs->mpvec, mpp, i)
718 if (setup_multipath(vecs, mpp))
725 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
727 struct vectors * vecs;
732 vecs = (struct vectors *)trigger_data;
734 pthread_cleanup_push(cleanup_lock, &vecs->lock);
736 pthread_testcancel();
738 r = parse_cmd(str, reply, len, vecs);
741 *reply = STRDUP("fail\n");
742 *len = strlen(*reply) + 1;
745 else if (!r && *len == 0) {
746 *reply = STRDUP("ok\n");
747 *len = strlen(*reply) + 1;
750 /* else if (r < 0) leave *reply alone */
752 lock_cleanup_pop(vecs->lock);
757 uev_discard(char * devpath)
763 * keep only block devices, discard partitions
765 tmp = strstr(devpath, "/block/");
767 condlog(4, "no /block/ in '%s'", devpath);
770 if (sscanf(tmp, "/block/%10s", a) != 1 ||
771 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
772 condlog(4, "discard event on %s", devpath);
779 uev_trigger (struct uevent * uev, void * trigger_data)
782 struct vectors * vecs;
784 vecs = (struct vectors *)trigger_data;
786 if (uev_discard(uev->devpath))
789 pthread_cleanup_push(cleanup_lock, &vecs->lock);
791 pthread_testcancel();
795 * Add events are ignored here as the tables
796 * are not fully initialised then.
798 if (!strncmp(uev->kernel, "dm-", 3)) {
799 if (!strncmp(uev->action, "change", 6)) {
800 r = uev_add_map(uev, vecs);
803 if (!strncmp(uev->action, "remove", 6)) {
804 r = uev_remove_map(uev, vecs);
811 * path add/remove event
813 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
817 if (!strncmp(uev->action, "add", 3)) {
818 r = uev_add_path(uev, vecs);
821 if (!strncmp(uev->action, "remove", 6)) {
822 r = uev_remove_path(uev, vecs);
825 if (!strncmp(uev->action, "change", 6)) {
826 r = uev_update_path(uev, vecs);
831 lock_cleanup_pop(vecs->lock);
836 ueventloop (void * ap)
838 if (uevent_listen(udev))
839 condlog(0, "error starting uevent listener");
847 if (uevent_dispatch(&uev_trigger, ap))
848 condlog(0, "error starting uevent dispatcher");
853 uxlsnrloop (void * ap)
858 set_handler_callback(LIST+PATHS, cli_list_paths);
859 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
860 set_handler_callback(LIST+MAPS, cli_list_maps);
861 set_handler_callback(LIST+STATUS, cli_list_status);
862 set_handler_callback(LIST+DAEMON, cli_list_daemon);
863 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
864 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
865 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
866 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
867 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
868 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
869 set_handler_callback(LIST+CONFIG, cli_list_config);
870 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
871 set_handler_callback(LIST+DEVICES, cli_list_devices);
872 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
873 set_handler_callback(ADD+PATH, cli_add_path);
874 set_handler_callback(DEL+PATH, cli_del_path);
875 set_handler_callback(ADD+MAP, cli_add_map);
876 set_handler_callback(DEL+MAP, cli_del_map);
877 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
878 set_handler_callback(RECONFIGURE, cli_reconfigure);
879 set_handler_callback(SUSPEND+MAP, cli_suspend);
880 set_handler_callback(RESUME+MAP, cli_resume);
881 set_handler_callback(RESIZE+MAP, cli_resize);
882 set_handler_callback(RELOAD+MAP, cli_reload);
883 set_handler_callback(RESET+MAP, cli_reassign);
884 set_handler_callback(REINSTATE+PATH, cli_reinstate);
885 set_handler_callback(FAIL+PATH, cli_fail);
886 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
887 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
888 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
889 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
890 set_handler_callback(QUIT, cli_quit);
891 set_handler_callback(SHUTDOWN, cli_shutdown);
892 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
893 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
894 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
895 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
896 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
899 uxsock_listen(&uxsock_trigger, ap);
913 switch (running_state) {
918 case DAEMON_CONFIGURE:
922 case DAEMON_SHUTDOWN:
929 fail_path (struct path * pp, int del_active)
934 condlog(2, "checker failed path %s in map %s",
935 pp->dev_t, pp->mpp->alias);
937 dm_fail_path(pp->mpp->alias, pp->dev_t);
939 update_queue_mode_del_path(pp->mpp);
943 * caller must have locked the path list before calling that function
946 reinstate_path (struct path * pp, int add_active)
951 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
952 condlog(0, "%s: reinstate failed", pp->dev_t);
954 condlog(2, "%s: reinstated", pp->dev_t);
956 update_queue_mode_add_path(pp->mpp);
961 enable_group(struct path * pp)
963 struct pathgroup * pgp;
966 * if path is added through uev_add_path, pgindex can be unset.
967 * next update_strings() will set it, upon map reload event.
969 * we can safely return here, because upon map reload, all
970 * PG will be enabled.
972 if (!pp->mpp->pg || !pp->pgindex)
975 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
977 if (pgp->status == PGSTATE_DISABLED) {
978 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
979 dm_enablegroup(pp->mpp->alias, pp->pgindex);
984 mpvec_garbage_collector (struct vectors * vecs)
986 struct multipath * mpp;
992 vector_foreach_slot (vecs->mpvec, mpp, i) {
993 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
994 condlog(2, "%s: remove dead map", mpp->alias);
995 remove_map_and_stop_waiter(mpp, vecs, 1);
1001 /* This is called after a path has started working again. It the multipath
1002 * device for this path uses the followover failback type, and this is the
1003 * best pathgroup, and this is the first path in the pathgroup to come back
1004 * up, then switch to this pathgroup */
1006 followover_should_failback(struct path * pp)
1008 struct pathgroup * pgp;
1012 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1013 !pp->mpp->pg || !pp->pgindex ||
1014 pp->pgindex != pp->mpp->bestpg)
1017 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1018 vector_foreach_slot(pgp->paths, pp1, i) {
1021 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1028 defered_failback_tick (vector mpvec)
1030 struct multipath * mpp;
1033 vector_foreach_slot (mpvec, mpp, i) {
1035 * defered failback getting sooner
1037 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1038 mpp->failback_tick--;
1040 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1041 switch_pathgroup(mpp);
1047 retry_count_tick(vector mpvec)
1049 struct multipath *mpp;
1052 vector_foreach_slot (mpvec, mpp, i) {
1053 if (mpp->retry_tick) {
1054 mpp->stat_total_queueing_time++;
1055 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1056 if(--mpp->retry_tick == 0) {
1057 dm_queue_if_no_path(mpp->alias, 0);
1058 condlog(2, "%s: Disable queueing", mpp->alias);
1064 int update_prio(struct path *pp, int refresh_all)
1068 struct pathgroup * pgp;
1069 int i, j, changed = 0;
1072 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1073 vector_foreach_slot (pgp->paths, pp1, j) {
1074 oldpriority = pp1->priority;
1075 pathinfo(pp1, conf->hwtable, DI_PRIO);
1076 if (pp1->priority != oldpriority)
1082 oldpriority = pp->priority;
1083 pathinfo(pp, conf->hwtable, DI_PRIO);
1085 if (pp->priority == oldpriority)
1090 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1092 if (reload_map(vecs, mpp, refresh))
1096 if (setup_multipath(vecs, mpp) != 0)
1098 sync_map_state(mpp);
1104 check_path (struct vectors * vecs, struct path * pp)
1107 int new_path_up = 0;
1108 int chkr_new_path_up = 0;
1109 int oldchkrstate = pp->chkrstate;
1114 if (pp->tick && --pp->tick)
1115 return; /* don't check this path yet */
1118 * provision a next check soonest,
1119 * in case we exit abnormaly from here
1121 pp->tick = conf->checkint;
1123 newstate = path_offline(pp);
1124 if (newstate == PATH_UP)
1125 newstate = get_state(pp, 1);
1127 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1128 condlog(2, "%s: unusable path", pp->dev);
1129 pathinfo(pp, conf->hwtable, 0);
1133 * Async IO in flight. Keep the previous path state
1134 * and reschedule as soon as possible
1136 if (newstate == PATH_PENDING) {
1141 * Synchronize with kernel state
1143 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1144 condlog(1, "%s: Could not synchronize with kernel state",
1146 pp->dmstate = PSTATE_UNDEF;
1148 pp->chkrstate = newstate;
1149 if (newstate != pp->state) {
1150 int oldstate = pp->state;
1151 pp->state = newstate;
1152 LOG_MSG(1, checker_message(&pp->checker));
1155 * upon state change, reset the checkint
1156 * to the shortest delay
1158 pp->checkint = conf->checkint;
1160 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1162 * proactively fail path in the DM
1164 if (oldstate == PATH_UP ||
1165 oldstate == PATH_GHOST)
1171 * cancel scheduled failback
1173 pp->mpp->failback_tick = 0;
1175 pp->mpp->stat_path_failures++;
1179 if(newstate == PATH_UP || newstate == PATH_GHOST){
1180 if ( pp->mpp && pp->mpp->prflag ){
1182 * Check Persistent Reservation.
1184 condlog(2, "%s: checking persistent reservation "
1185 "registration", pp->dev);
1186 mpath_pr_event_handle(pp);
1191 * reinstate this path
1193 if (oldstate != PATH_UP &&
1194 oldstate != PATH_GHOST)
1195 reinstate_path(pp, 1);
1197 reinstate_path(pp, 0);
1201 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1202 chkr_new_path_up = 1;
1205 * if at least one path is up in a group, and
1206 * the group is disabled, re-enable it
1208 if (newstate == PATH_UP)
1211 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1212 if (pp->dmstate == PSTATE_FAILED ||
1213 pp->dmstate == PSTATE_UNDEF) {
1214 /* Clear IO errors */
1215 reinstate_path(pp, 0);
1217 LOG_MSG(4, checker_message(&pp->checker));
1218 if (pp->checkint != conf->max_checkint) {
1220 * double the next check delay.
1221 * max at conf->max_checkint
1223 if (pp->checkint < (conf->max_checkint / 2))
1224 pp->checkint = 2 * pp->checkint;
1226 pp->checkint = conf->max_checkint;
1228 condlog(4, "%s: delay next check %is",
1229 pp->dev_t, pp->checkint);
1231 pp->tick = pp->checkint;
1234 else if (newstate == PATH_DOWN) {
1235 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1236 LOG_MSG(3, checker_message(&pp->checker));
1238 LOG_MSG(2, checker_message(&pp->checker));
1241 pp->state = newstate;
1244 * path prio refreshing
1246 condlog(4, "path prio refresh");
1248 if (update_prio(pp, new_path_up) &&
1249 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1250 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1251 update_path_groups(pp->mpp, vecs, !new_path_up);
1252 else if (need_switch_pathgroup(pp->mpp, 0)) {
1253 if (pp->mpp->pgfailback > 0 &&
1254 (new_path_up || pp->mpp->failback_tick <= 0))
1255 pp->mpp->failback_tick =
1256 pp->mpp->pgfailback + 1;
1257 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1258 (chkr_new_path_up && followover_should_failback(pp)))
1259 switch_pathgroup(pp->mpp);
1264 checkerloop (void *ap)
1266 struct vectors *vecs;
1271 mlockall(MCL_CURRENT | MCL_FUTURE);
1272 vecs = (struct vectors *)ap;
1273 condlog(2, "path checkers start up");
1276 * init the path check interval
1278 vector_foreach_slot (vecs->pathvec, pp, i) {
1279 pp->checkint = conf->checkint;
1283 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1285 pthread_testcancel();
1288 if (vecs->pathvec) {
1289 vector_foreach_slot (vecs->pathvec, pp, i) {
1290 check_path(vecs, pp);
1294 defered_failback_tick(vecs->mpvec);
1295 retry_count_tick(vecs->mpvec);
1300 condlog(4, "map garbage collection");
1301 mpvec_garbage_collector(vecs);
1305 lock_cleanup_pop(vecs->lock);
1312 configure (struct vectors * vecs, int start_waiters)
1314 struct multipath * mpp;
1319 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1322 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1325 if (!(mpvec = vector_alloc()))
1329 * probe for current path (from sysfs) and map (from dm) sets
1331 path_discovery(vecs->pathvec, conf, DI_ALL);
1333 vector_foreach_slot (vecs->pathvec, pp, i){
1334 if (filter_path(conf, pp) > 0){
1335 vector_del_slot(vecs->pathvec, i);
1340 pp->checkint = conf->checkint;
1342 if (map_discovery(vecs))
1346 * create new set of maps & push changed ones into dm
1348 if (coalesce_paths(vecs, mpvec, NULL, 1))
1352 * may need to remove some maps which are no longer relevant
1353 * e.g., due to blacklist changes in conf file
1355 if (coalesce_maps(vecs, mpvec))
1360 sync_maps_state(mpvec);
1361 vector_foreach_slot(mpvec, mpp, i){
1362 remember_wwid(mpp->wwid);
1367 * purge dm of old maps
1372 * save new set of maps formed by considering current path state
1374 vector_free(vecs->mpvec);
1375 vecs->mpvec = mpvec;
1378 * start dm event waiter threads for these new maps
1380 vector_foreach_slot(vecs->mpvec, mpp, i) {
1381 if (setup_multipath(vecs, mpp))
1384 if (start_waiter_thread(mpp, vecs))
1391 reconfigure (struct vectors * vecs)
1393 struct config * old = conf;
1397 * free old map and path vectors ... they use old conf state
1399 if (VECTOR_SIZE(vecs->mpvec))
1400 remove_maps_and_stop_waiters(vecs);
1402 if (VECTOR_SIZE(vecs->pathvec))
1403 free_pathvec(vecs->pathvec, FREE_PATHS);
1405 vecs->pathvec = NULL;
1408 if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1409 conf->verbosity = old->verbosity;
1419 static struct vectors *
1422 struct vectors * vecs;
1424 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1430 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1432 if (!vecs->lock.mutex)
1435 pthread_mutex_init(vecs->lock.mutex, NULL);
1436 vecs->lock.depth = 0;
1442 condlog(0, "failed to init paths");
1447 signal_set(int signo, void (*func) (int))
1450 struct sigaction sig;
1451 struct sigaction osig;
1453 sig.sa_handler = func;
1454 sigemptyset(&sig.sa_mask);
1457 r = sigaction(signo, &sig, &osig);
1462 return (osig.sa_handler);
1466 handle_signals(void)
1468 if (reconfig_sig && running_state == DAEMON_RUNNING) {
1469 condlog(2, "reconfigure (signal)");
1470 pthread_cleanup_push(cleanup_lock,
1473 pthread_testcancel();
1475 lock_cleanup_pop(gvecs->lock);
1477 if (log_reset_sig) {
1478 condlog(2, "reset log (signal)");
1479 pthread_mutex_lock(&logq_lock);
1480 log_reset("multipathd");
1481 pthread_mutex_unlock(&logq_lock);
1511 sigaddset(&set, SIGHUP);
1512 sigaddset(&set, SIGUSR1);
1513 pthread_sigmask(SIG_BLOCK, &set, NULL);
1515 signal_set(SIGHUP, sighup);
1516 signal_set(SIGUSR1, sigusr1);
1517 signal_set(SIGINT, sigend);
1518 signal_set(SIGTERM, sigend);
1519 signal(SIGPIPE, SIG_IGN);
1526 static struct sched_param sched_param = {
1527 .sched_priority = 99
1530 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1533 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1540 #ifdef OOM_SCORE_ADJ_MIN
1542 char *file = "/proc/self/oom_score_adj";
1543 int score = OOM_SCORE_ADJ_MIN;
1546 char *file = "/proc/self/oom_adj";
1547 int score = OOM_ADJUST_MIN;
1553 if (stat(file, &st) == 0){
1554 fp = fopen(file, "w");
1556 condlog(0, "couldn't fopen %s : %s", file,
1560 fprintf(fp, "%i", score);
1564 if (errno != ENOENT) {
1565 condlog(0, "couldn't stat %s : %s", file,
1569 #ifdef OOM_ADJUST_MIN
1570 file = "/proc/self/oom_adj";
1571 score = OOM_ADJUST_MIN;
1576 condlog(0, "couldn't adjust oom score");
1580 child (void * param)
1582 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1583 pthread_attr_t log_attr, misc_attr;
1584 struct vectors * vecs;
1585 struct multipath * mpp;
1589 mlockall(MCL_CURRENT | MCL_FUTURE);
1590 sem_init(&exit_sem, 0, 0);
1595 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1596 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1599 setup_thread_attr(&log_attr, 64 * 1024, 0);
1600 log_thread_start(&log_attr);
1601 pthread_attr_destroy(&log_attr);
1604 running_state = DAEMON_START;
1606 condlog(2, "--------start up--------");
1607 condlog(2, "read " DEFAULT_CONFIGFILE);
1609 if (load_config(DEFAULT_CONFIGFILE, udev))
1612 if (init_checkers()) {
1613 condlog(0, "failed to initialize checkers");
1617 condlog(0, "failed to initialize prioritizers");
1621 setlogmask(LOG_UPTO(conf->verbosity + 3));
1623 if (conf->max_fds) {
1624 struct rlimit fd_limit;
1626 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1627 condlog(0, "can't get open fds limit: %s",
1629 fd_limit.rlim_cur = 0;
1630 fd_limit.rlim_max = 0;
1632 if (fd_limit.rlim_cur < conf->max_fds) {
1633 fd_limit.rlim_cur = conf->max_fds;
1634 if (fd_limit.rlim_max < conf->max_fds)
1635 fd_limit.rlim_max = conf->max_fds;
1636 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1637 condlog(0, "can't set open fds limit to "
1639 fd_limit.rlim_cur, fd_limit.rlim_max,
1642 condlog(3, "set open fds limit to %lu/%lu",
1643 fd_limit.rlim_cur, fd_limit.rlim_max);
1649 vecs = gvecs = init_vecs();
1657 udev_set_sync_support(0);
1659 * Start uevent listener early to catch events
1661 if ((rc = pthread_create(&uevent_thr, &misc_attr, ueventloop, udev))) {
1662 condlog(0, "failed to create uevent thread: %d", rc);
1665 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1666 condlog(0, "failed to create cli listener: %d", rc);
1670 * fetch and configure both paths and multipaths
1672 running_state = DAEMON_CONFIGURE;
1675 if (configure(vecs, 1)) {
1677 condlog(0, "failure during configuration");
1685 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1686 condlog(0,"failed to create checker loop thread: %d", rc);
1689 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1690 condlog(0, "failed to create uevent dispatcher: %d", rc);
1693 pthread_attr_destroy(&misc_attr);
1695 /* Startup complete, create logfile */
1696 pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1697 /* Ignore errors, we can live without */
1699 running_state = DAEMON_RUNNING;
1704 while(sem_wait(&exit_sem) != 0); /* Do nothing */
1705 running_state = DAEMON_SHUTDOWN;
1707 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1708 vector_foreach_slot(vecs->mpvec, mpp, i)
1709 dm_queue_if_no_path(mpp->alias, 0);
1710 remove_maps_and_stop_waiters(vecs);
1713 pthread_cancel(check_thr);
1714 pthread_cancel(uevent_thr);
1715 pthread_cancel(uxlsnr_thr);
1716 pthread_cancel(uevq_thr);
1719 free_pathvec(vecs->pathvec, FREE_PATHS);
1720 vecs->pathvec = NULL;
1722 /* Now all the waitevent threads will start rushing in. */
1723 while (vecs->lock.depth > 0) {
1724 sleep (1); /* This is weak. */
1725 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1726 " waiting...", vecs->lock.depth);
1728 pthread_mutex_destroy(vecs->lock.mutex);
1729 FREE(vecs->lock.mutex);
1730 vecs->lock.depth = 0;
1731 vecs->lock.mutex = NULL;
1741 /* We're done here */
1743 condlog(3, "unlink pidfile");
1744 unlink(DEFAULT_PIDFILE);
1747 condlog(2, "--------shut down-------");
1753 * Freeing config must be done after condlog() and dm_lib_exit(),
1754 * because logging functions like dlog() and dm_write_log()
1755 * reference the config.
1762 dbg_free_final(NULL);
1774 if( (pid = fork()) < 0){
1775 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1783 if ( (pid = fork()) < 0)
1784 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1789 fprintf(stderr, "cannot chdir to '/', continuing\n");
1791 dev_null_fd = open("/dev/null", O_RDWR);
1792 if (dev_null_fd < 0){
1793 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1798 close(STDIN_FILENO);
1799 if (dup(dev_null_fd) < 0) {
1800 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1804 close(STDOUT_FILENO);
1805 if (dup(dev_null_fd) < 0) {
1806 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1810 close(STDERR_FILENO);
1811 if (dup(dev_null_fd) < 0) {
1812 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1817 daemon_pid = getpid();
1822 main (int argc, char *argv[])
1824 extern char *optarg;
1830 running_state = DAEMON_INIT;
1833 if (getuid() != 0) {
1834 fprintf(stderr, "need to be root\n");
1838 /* make sure we don't lock any path */
1840 fprintf(stderr, "can't chdir to root directory : %s\n",
1842 umask(umask(077) | 022);
1844 conf = alloc_config();
1849 while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1853 //debug=1; /* ### comment me out ### */
1856 if (sizeof(optarg) > sizeof(char *) ||
1857 !isdigit(optarg[0]))
1860 conf->verbosity = atoi(optarg);
1869 if (optind < argc) {
1874 while (optind < argc) {
1875 if (strchr(argv[optind], ' '))
1876 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1878 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1881 c += snprintf(c, s + CMDSIZE - c, "\n");
1899 return (child(NULL));
1902 void * mpath_pr_event_handler_fn (void * pathp )
1904 struct multipath * mpp;
1905 int i,j, ret, isFound;
1906 struct path * pp = (struct path *)pathp;
1907 unsigned char *keyp;
1909 struct prout_param_descriptor *param;
1910 struct prin_resp *resp;
1914 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1916 condlog(0,"%s Alloc failed for prin response", pp->dev);
1920 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1921 if (ret != MPATH_PR_SUCCESS )
1923 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1927 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1928 resp->prin_descriptor.prin_readkeys.additional_length );
1930 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1932 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1933 ret = MPATH_PR_SUCCESS;
1937 keyp = (unsigned char *)mpp->reservation_key;
1938 for (j = 0; j < 8; ++j) {
1944 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
1947 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1949 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
1950 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1951 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1953 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1960 condlog(0, "%s: Either device not registered or ", pp->dev);
1961 condlog(0, "host is not authorised for registration. Skip path");
1962 ret = MPATH_PR_OTHER;
1966 param= malloc(sizeof(struct prout_param_descriptor));
1967 memset(param, 0 , sizeof(struct prout_param_descriptor));
1969 for (j = 7; j >= 0; --j) {
1970 param->sa_key[j] = (prkey & 0xff);
1973 param->num_transportid = 0;
1975 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1977 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1978 if (ret != MPATH_PR_SUCCESS )
1980 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1990 int mpath_pr_event_handle(struct path *pp)
1994 pthread_attr_t attr;
1995 struct multipath * mpp;
1999 if (!mpp->reservation_key)
2002 pthread_attr_init(&attr);
2003 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2005 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2007 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2010 pthread_attr_destroy(&attr);
2011 rc = pthread_join(thread, NULL);