2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
21 #include <systemd/sd-daemon.h>
23 #include <semaphore.h>
24 #include <mpath_persist.h>
42 #include <blacklist.h>
43 #include <structs_vec.h>
45 #include <devmapper.h>
48 #include <discovery.h>
52 #include <switchgroup.h>
54 #include <configure.h>
56 #include <pgpolicies.h>
65 #include "cli_handlers.h"
70 #define FILE_NAME_SIZE 256
73 #define LOG_MSG(a, b) \
76 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
78 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
81 struct mpath_event_param
84 struct multipath *mpp;
87 unsigned int mpath_mx_alloc_len;
90 enum daemon_status running_state;
93 static sem_t exit_sem;
95 * global copy of vecs for use in sig handlers
97 struct vectors * gvecs;
102 need_switch_pathgroup (struct multipath * mpp, int refresh)
104 struct pathgroup * pgp;
108 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
112 * Refresh path priority values
115 vector_foreach_slot (mpp->pg, pgp, i)
116 vector_foreach_slot (pgp->paths, pp, j)
117 pathinfo(pp, conf->hwtable, DI_PRIO);
119 mpp->bestpg = select_path_group(mpp);
121 if (mpp->bestpg != mpp->nextpg)
128 switch_pathgroup (struct multipath * mpp)
130 mpp->stat_switchgroup++;
131 dm_switchgroup(mpp->alias, mpp->bestpg);
132 condlog(2, "%s: switch to path group #%i",
133 mpp->alias, mpp->bestpg);
137 coalesce_maps(struct vectors *vecs, vector nmpv)
139 struct multipath * ompp;
140 vector ompv = vecs->mpvec;
143 vector_foreach_slot (ompv, ompp, i) {
144 condlog(3, "%s: coalesce map", ompp->alias);
145 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
147 * remove all current maps not allowed by the
148 * current configuration
150 if (dm_flush_map(ompp->alias)) {
151 condlog(0, "%s: unable to flush devmap",
154 * may be just because the device is open
156 if (setup_multipath(vecs, ompp) != 0) {
160 if (!vector_alloc_slot(nmpv))
163 vector_set_slot(nmpv, ompp);
165 vector_del_slot(ompv, i);
170 condlog(2, "%s devmap removed", ompp->alias);
172 } else if (conf->reassign_maps) {
173 condlog(3, "%s: Reassign existing device-mapper"
174 " devices", ompp->alias);
175 dm_reassign(ompp->alias);
182 sync_map_state(struct multipath *mpp)
184 struct pathgroup *pgp;
191 vector_foreach_slot (mpp->pg, pgp, i){
192 vector_foreach_slot (pgp->paths, pp, j){
193 if (pp->state == PATH_UNCHECKED ||
194 pp->state == PATH_WILD)
196 if ((pp->dmstate == PSTATE_FAILED ||
197 pp->dmstate == PSTATE_UNDEF) &&
198 (pp->state == PATH_UP || pp->state == PATH_GHOST))
199 dm_reinstate_path(mpp->alias, pp->dev_t);
200 else if ((pp->dmstate == PSTATE_ACTIVE ||
201 pp->dmstate == PSTATE_UNDEF) &&
202 (pp->state == PATH_DOWN ||
203 pp->state == PATH_SHAKY))
204 dm_fail_path(mpp->alias, pp->dev_t);
210 sync_maps_state(vector mpvec)
213 struct multipath *mpp;
215 vector_foreach_slot (mpvec, mpp, i)
220 flush_map(struct multipath * mpp, struct vectors * vecs)
223 * clear references to this map before flushing so we can ignore
224 * the spurious uevent we may generate with the dm_flush_map call below
226 if (dm_flush_map(mpp->alias)) {
228 * May not really be an error -- if the map was already flushed
229 * from the device mapper by dmsetup(8) for instance.
231 condlog(0, "%s: can't flush", mpp->alias);
236 condlog(2, "%s: map flushed", mpp->alias);
239 orphan_paths(vecs->pathvec, mpp);
240 remove_map_and_stop_waiter(mpp, vecs, 1);
246 uev_add_map (struct uevent * uev, struct vectors * vecs)
249 int major = -1, minor = -1, rc;
251 condlog(3, "%s: add map (uevent)", uev->kernel);
252 alias = uevent_get_dm_name(uev);
254 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
255 major = uevent_get_major(uev);
256 minor = uevent_get_minor(uev);
257 alias = dm_mapname(major, minor);
259 condlog(2, "%s: mapname not found for %d:%d",
260 uev->kernel, major, minor);
264 rc = ev_add_map(uev->kernel, alias, vecs);
270 ev_add_map (char * dev, char * alias, struct vectors * vecs)
273 struct multipath * mpp;
277 map_present = dm_map_present(alias);
279 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
280 condlog(4, "%s: not a multipath map", alias);
284 mpp = find_mp_by_alias(vecs->mpvec, alias);
288 * Not really an error -- we generate our own uevent
289 * if we create a multipath mapped device as a result
292 if (conf->reassign_maps) {
293 condlog(3, "%s: Reassign existing device-mapper devices",
299 condlog(2, "%s: adding map", alias);
302 * now we can register the map
304 if (map_present && (mpp = add_map_without_path(vecs, alias))) {
306 condlog(2, "%s: devmap %s registered", alias, dev);
309 r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
312 r = coalesce_paths(vecs, NULL, refwwid, 0);
317 condlog(2, "%s: devmap %s added", alias, dev);
319 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
321 condlog(0, "%s: uev_add_map %s failed", alias, dev);
328 uev_remove_map (struct uevent * uev, struct vectors * vecs)
332 struct multipath *mpp;
334 condlog(2, "%s: remove map (uevent)", uev->kernel);
335 alias = uevent_get_dm_name(uev);
337 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
340 minor = uevent_get_minor(uev);
341 mpp = find_mp_by_minor(vecs->mpvec, minor);
344 condlog(2, "%s: devmap not registered, can't remove",
348 if (strcmp(mpp->alias, alias)) {
349 condlog(2, "%s: minor number mismatch (map %d, event %d)",
350 mpp->alias, mpp->dmi->minor, minor);
354 orphan_paths(vecs->pathvec, mpp);
355 remove_map_and_stop_waiter(mpp, vecs, 1);
362 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
364 struct multipath * mpp;
366 mpp = find_mp_by_minor(vecs->mpvec, minor);
369 condlog(2, "%s: devmap not registered, can't remove",
373 if (strcmp(mpp->alias, alias)) {
374 condlog(2, "%s: minor number mismatch (map %d, event %d)",
375 mpp->alias, mpp->dmi->minor, minor);
378 return flush_map(mpp, vecs);
382 uev_add_path (struct uevent *uev, struct vectors * vecs)
387 condlog(2, "%s: add path (uevent)", uev->kernel);
388 if (strstr(uev->kernel, "..") != NULL) {
390 * Don't allow relative device names in the pathvec
392 condlog(0, "%s: path name is invalid", uev->kernel);
396 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
398 condlog(0, "%s: spurious uevent, path already in pathvec",
402 if (!strlen(pp->wwid)) {
403 udev_device_unref(pp->udev);
404 pp->udev = udev_device_ref(uev->udev);
405 ret = pathinfo(pp, conf->hwtable,
406 DI_ALL | DI_BLACKLIST);
408 i = find_slot(vecs->pathvec, (void *)pp);
410 vector_del_slot(vecs->pathvec, i);
413 } else if (ret == 1) {
414 condlog(0, "%s: failed to reinitialize path",
421 * get path vital state
423 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
424 uev->udev, DI_ALL, &pp);
428 condlog(0, "%s: failed to store path info",
432 pp->checkint = conf->checkint;
435 return ev_add_path(pp, vecs);
444 ev_add_path (struct path * pp, struct vectors * vecs)
446 struct multipath * mpp;
447 char empty_buff[WWID_SIZE] = {0};
448 char params[PARAMS_SIZE] = {0};
450 int start_waiter = 0;
453 * need path UID to go any further
455 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
456 condlog(0, "%s: failed to get path uid", pp->dev);
457 goto fail; /* leave path added to pathvec */
459 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
462 if ((!pp->size) || (mpp->size != pp->size)) {
464 condlog(0, "%s: failed to add new path %s, "
466 mpp->alias, pp->dev);
468 condlog(0, "%s: failed to add new path %s, "
469 "device size mismatch",
470 mpp->alias, pp->dev);
471 int i = find_slot(vecs->pathvec, (void *)pp);
473 vector_del_slot(vecs->pathvec, i);
478 condlog(4,"%s: adopting all paths for path %s",
479 mpp->alias, pp->dev);
480 if (adopt_paths(vecs->pathvec, mpp, 1))
481 goto fail; /* leave path added to pathvec */
483 verify_paths(mpp, vecs, NULL);
484 mpp->flush_on_last_del = FLUSH_UNDEF;
485 mpp->action = ACT_RELOAD;
489 condlog(0, "%s: failed to create new map,"
490 " device size is 0 ", pp->dev);
491 int i = find_slot(vecs->pathvec, (void *)pp);
493 vector_del_slot(vecs->pathvec, i);
498 condlog(4,"%s: creating new map", pp->dev);
499 if ((mpp = add_map_with_path(vecs, pp, 1))) {
500 mpp->action = ACT_CREATE;
502 * We don't depend on ACT_CREATE, as domap will
503 * set it to ACT_NOTHING when complete.
508 goto fail; /* leave path added to pathvec */
511 /* persistent reseravtion check*/
512 mpath_pr_event_handle(pp);
515 * push the map to the device-mapper
517 if (setup_map(mpp, params, PARAMS_SIZE)) {
518 condlog(0, "%s: failed to setup map for addition of new "
519 "path %s", mpp->alias, pp->dev);
523 * reload the map for the multipath mapped device
525 if (domap(mpp, params) <= 0) {
526 condlog(0, "%s: failed in domap for addition of new "
527 "path %s", mpp->alias, pp->dev);
529 * deal with asynchronous uevents :((
531 if (mpp->action == ACT_RELOAD && retries-- > 0) {
532 condlog(0, "%s: uev_add_path sleep", mpp->alias);
534 update_mpp_paths(mpp, vecs->pathvec);
537 else if (mpp->action == ACT_RELOAD)
538 condlog(0, "%s: giving up reload", mpp->alias);
545 * update our state from kernel regardless of create or reload
547 if (setup_multipath(vecs, mpp))
548 goto fail; /* if setup_multipath fails, it removes the map */
552 if ((mpp->action == ACT_CREATE ||
553 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
554 start_waiter_thread(mpp, vecs))
558 condlog(2, "%s [%s]: path added to devmap %s",
559 pp->dev, pp->dev_t, mpp->alias);
566 remove_map(mpp, vecs, 1);
568 orphan_path(pp, "failed to add path");
573 uev_remove_path (struct uevent *uev, struct vectors * vecs)
577 condlog(2, "%s: remove path (uevent)", uev->kernel);
578 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
581 /* Not an error; path might have been purged earlier */
582 condlog(0, "%s: path already removed", uev->kernel);
586 return ev_remove_path(pp, vecs);
590 ev_remove_path (struct path *pp, struct vectors * vecs)
592 struct multipath * mpp;
594 char params[PARAMS_SIZE] = {0};
597 * avoid referring to the map of an orphaned path
599 if ((mpp = pp->mpp)) {
601 * transform the mp->pg vector of vectors of paths
602 * into a mp->params string to feed the device-mapper
604 if (update_mpp_paths(mpp, vecs->pathvec)) {
605 condlog(0, "%s: failed to update paths",
609 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
610 vector_del_slot(mpp->paths, i);
613 * remove the map IFF removing the last path
615 if (VECTOR_SIZE(mpp->paths) == 0) {
616 char alias[WWID_SIZE];
619 * flush_map will fail if the device is open
621 strncpy(alias, mpp->alias, WWID_SIZE);
622 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
623 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
625 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
626 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
627 dm_queue_if_no_path(mpp->alias, 0);
629 if (!flush_map(mpp, vecs)) {
630 condlog(2, "%s: removed map after"
631 " removing all paths",
637 * Not an error, continue
641 if (setup_map(mpp, params, PARAMS_SIZE)) {
642 condlog(0, "%s: failed to setup map for"
643 " removal of path %s", mpp->alias, pp->dev);
649 mpp->action = ACT_RELOAD;
650 if (domap(mpp, params) <= 0) {
651 condlog(0, "%s: failed in domap for "
652 "removal of path %s",
653 mpp->alias, pp->dev);
657 * update our state from kernel
659 if (setup_multipath(vecs, mpp)) {
664 condlog(2, "%s [%s]: path removed from map %s",
665 pp->dev, pp->dev_t, mpp->alias);
670 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
671 vector_del_slot(vecs->pathvec, i);
678 remove_map_and_stop_waiter(mpp, vecs, 1);
683 uev_update_path (struct uevent *uev, struct vectors * vecs)
687 ro = uevent_get_disk_ro(uev);
692 condlog(2, "%s: update path write_protect to '%d' (uevent)",
694 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
696 condlog(0, "%s: spurious uevent, path not found",
701 retval = reload_map(vecs, pp->mpp, 0);
703 condlog(2, "%s: map %s reloaded (retval %d)",
704 uev->kernel, pp->mpp->alias, retval);
713 map_discovery (struct vectors * vecs)
715 struct multipath * mpp;
718 if (dm_get_maps(vecs->mpvec))
721 vector_foreach_slot (vecs->mpvec, mpp, i)
722 if (setup_multipath(vecs, mpp))
729 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
731 struct vectors * vecs;
736 vecs = (struct vectors *)trigger_data;
738 pthread_cleanup_push(cleanup_lock, &vecs->lock);
740 pthread_testcancel();
742 r = parse_cmd(str, reply, len, vecs);
745 *reply = STRDUP("fail\n");
746 *len = strlen(*reply) + 1;
749 else if (!r && *len == 0) {
750 *reply = STRDUP("ok\n");
751 *len = strlen(*reply) + 1;
754 /* else if (r < 0) leave *reply alone */
756 lock_cleanup_pop(vecs->lock);
761 uev_discard(char * devpath)
767 * keep only block devices, discard partitions
769 tmp = strstr(devpath, "/block/");
771 condlog(4, "no /block/ in '%s'", devpath);
774 if (sscanf(tmp, "/block/%10s", a) != 1 ||
775 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
776 condlog(4, "discard event on %s", devpath);
783 uev_trigger (struct uevent * uev, void * trigger_data)
786 struct vectors * vecs;
788 vecs = (struct vectors *)trigger_data;
790 if (uev_discard(uev->devpath))
793 pthread_cleanup_push(cleanup_lock, &vecs->lock);
795 pthread_testcancel();
799 * Add events are ignored here as the tables
800 * are not fully initialised then.
802 if (!strncmp(uev->kernel, "dm-", 3)) {
803 if (!strncmp(uev->action, "change", 6)) {
804 r = uev_add_map(uev, vecs);
807 if (!strncmp(uev->action, "remove", 6)) {
808 r = uev_remove_map(uev, vecs);
815 * path add/remove event
817 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
821 if (!strncmp(uev->action, "add", 3)) {
822 r = uev_add_path(uev, vecs);
825 if (!strncmp(uev->action, "remove", 6)) {
826 r = uev_remove_path(uev, vecs);
829 if (!strncmp(uev->action, "change", 6)) {
830 r = uev_update_path(uev, vecs);
835 lock_cleanup_pop(vecs->lock);
840 ueventloop (void * ap)
842 if (uevent_listen(udev))
843 condlog(0, "error starting uevent listener");
851 if (uevent_dispatch(&uev_trigger, ap))
852 condlog(0, "error starting uevent dispatcher");
857 uxlsnrloop (void * ap)
862 set_handler_callback(LIST+PATHS, cli_list_paths);
863 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
864 set_handler_callback(LIST+MAPS, cli_list_maps);
865 set_handler_callback(LIST+STATUS, cli_list_status);
866 set_handler_callback(LIST+DAEMON, cli_list_daemon);
867 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
868 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
869 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
870 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
871 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
872 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
873 set_handler_callback(LIST+CONFIG, cli_list_config);
874 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
875 set_handler_callback(LIST+DEVICES, cli_list_devices);
876 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
877 set_handler_callback(ADD+PATH, cli_add_path);
878 set_handler_callback(DEL+PATH, cli_del_path);
879 set_handler_callback(ADD+MAP, cli_add_map);
880 set_handler_callback(DEL+MAP, cli_del_map);
881 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
882 set_handler_callback(RECONFIGURE, cli_reconfigure);
883 set_handler_callback(SUSPEND+MAP, cli_suspend);
884 set_handler_callback(RESUME+MAP, cli_resume);
885 set_handler_callback(RESIZE+MAP, cli_resize);
886 set_handler_callback(RELOAD+MAP, cli_reload);
887 set_handler_callback(RESET+MAP, cli_reassign);
888 set_handler_callback(REINSTATE+PATH, cli_reinstate);
889 set_handler_callback(FAIL+PATH, cli_fail);
890 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
891 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
892 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
893 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
894 set_handler_callback(QUIT, cli_quit);
895 set_handler_callback(SHUTDOWN, cli_shutdown);
896 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
897 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
898 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
899 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
900 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
903 uxsock_listen(&uxsock_trigger, ap);
917 switch (running_state) {
922 case DAEMON_CONFIGURE:
926 case DAEMON_SHUTDOWN:
933 fail_path (struct path * pp, int del_active)
938 condlog(2, "checker failed path %s in map %s",
939 pp->dev_t, pp->mpp->alias);
941 dm_fail_path(pp->mpp->alias, pp->dev_t);
943 update_queue_mode_del_path(pp->mpp);
947 * caller must have locked the path list before calling that function
950 reinstate_path (struct path * pp, int add_active)
955 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
956 condlog(0, "%s: reinstate failed", pp->dev_t);
958 condlog(2, "%s: reinstated", pp->dev_t);
960 update_queue_mode_add_path(pp->mpp);
965 enable_group(struct path * pp)
967 struct pathgroup * pgp;
970 * if path is added through uev_add_path, pgindex can be unset.
971 * next update_strings() will set it, upon map reload event.
973 * we can safely return here, because upon map reload, all
974 * PG will be enabled.
976 if (!pp->mpp->pg || !pp->pgindex)
979 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
981 if (pgp->status == PGSTATE_DISABLED) {
982 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
983 dm_enablegroup(pp->mpp->alias, pp->pgindex);
988 mpvec_garbage_collector (struct vectors * vecs)
990 struct multipath * mpp;
996 vector_foreach_slot (vecs->mpvec, mpp, i) {
997 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
998 condlog(2, "%s: remove dead map", mpp->alias);
999 remove_map_and_stop_waiter(mpp, vecs, 1);
1005 /* This is called after a path has started working again. It the multipath
1006 * device for this path uses the followover failback type, and this is the
1007 * best pathgroup, and this is the first path in the pathgroup to come back
1008 * up, then switch to this pathgroup */
1010 followover_should_failback(struct path * pp)
1012 struct pathgroup * pgp;
1016 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1017 !pp->mpp->pg || !pp->pgindex ||
1018 pp->pgindex != pp->mpp->bestpg)
1021 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1022 vector_foreach_slot(pgp->paths, pp1, i) {
1025 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1032 defered_failback_tick (vector mpvec)
1034 struct multipath * mpp;
1037 vector_foreach_slot (mpvec, mpp, i) {
1039 * defered failback getting sooner
1041 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1042 mpp->failback_tick--;
1044 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1045 switch_pathgroup(mpp);
1051 retry_count_tick(vector mpvec)
1053 struct multipath *mpp;
1056 vector_foreach_slot (mpvec, mpp, i) {
1057 if (mpp->retry_tick) {
1058 mpp->stat_total_queueing_time++;
1059 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1060 if(--mpp->retry_tick == 0) {
1061 dm_queue_if_no_path(mpp->alias, 0);
1062 condlog(2, "%s: Disable queueing", mpp->alias);
1068 int update_prio(struct path *pp, int refresh_all)
1072 struct pathgroup * pgp;
1073 int i, j, changed = 0;
1076 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1077 vector_foreach_slot (pgp->paths, pp1, j) {
1078 oldpriority = pp1->priority;
1079 pathinfo(pp1, conf->hwtable, DI_PRIO);
1080 if (pp1->priority != oldpriority)
1086 oldpriority = pp->priority;
1087 pathinfo(pp, conf->hwtable, DI_PRIO);
1089 if (pp->priority == oldpriority)
1094 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1096 if (reload_map(vecs, mpp, refresh))
1100 if (setup_multipath(vecs, mpp) != 0)
1102 sync_map_state(mpp);
1108 check_path (struct vectors * vecs, struct path * pp)
1111 int new_path_up = 0;
1112 int chkr_new_path_up = 0;
1113 int oldchkrstate = pp->chkrstate;
1118 if (pp->tick && --pp->tick)
1119 return 0; /* don't check this path yet */
1122 * provision a next check soonest,
1123 * in case we exit abnormaly from here
1125 pp->tick = conf->checkint;
1127 newstate = path_offline(pp);
1128 if (newstate == PATH_UP)
1129 newstate = get_state(pp, 1);
1131 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1132 condlog(2, "%s: unusable path", pp->dev);
1133 pathinfo(pp, conf->hwtable, 0);
1137 * Async IO in flight. Keep the previous path state
1138 * and reschedule as soon as possible
1140 if (newstate == PATH_PENDING) {
1145 * Synchronize with kernel state
1147 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1148 condlog(1, "%s: Could not synchronize with kernel state",
1150 pp->dmstate = PSTATE_UNDEF;
1152 pp->chkrstate = newstate;
1153 if (newstate != pp->state) {
1154 int oldstate = pp->state;
1155 pp->state = newstate;
1156 LOG_MSG(1, checker_message(&pp->checker));
1159 * upon state change, reset the checkint
1160 * to the shortest delay
1162 pp->checkint = conf->checkint;
1164 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1166 * proactively fail path in the DM
1168 if (oldstate == PATH_UP ||
1169 oldstate == PATH_GHOST)
1175 * cancel scheduled failback
1177 pp->mpp->failback_tick = 0;
1179 pp->mpp->stat_path_failures++;
1183 if(newstate == PATH_UP || newstate == PATH_GHOST){
1184 if ( pp->mpp && pp->mpp->prflag ){
1186 * Check Persistent Reservation.
1188 condlog(2, "%s: checking persistent reservation "
1189 "registration", pp->dev);
1190 mpath_pr_event_handle(pp);
1195 * reinstate this path
1197 if (oldstate != PATH_UP &&
1198 oldstate != PATH_GHOST)
1199 reinstate_path(pp, 1);
1201 reinstate_path(pp, 0);
1205 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1206 chkr_new_path_up = 1;
1209 * if at least one path is up in a group, and
1210 * the group is disabled, re-enable it
1212 if (newstate == PATH_UP)
1215 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1216 if (pp->dmstate == PSTATE_FAILED ||
1217 pp->dmstate == PSTATE_UNDEF) {
1218 /* Clear IO errors */
1219 reinstate_path(pp, 0);
1221 LOG_MSG(4, checker_message(&pp->checker));
1222 if (pp->checkint != conf->max_checkint) {
1224 * double the next check delay.
1225 * max at conf->max_checkint
1227 if (pp->checkint < (conf->max_checkint / 2))
1228 pp->checkint = 2 * pp->checkint;
1230 pp->checkint = conf->max_checkint;
1232 condlog(4, "%s: delay next check %is",
1233 pp->dev_t, pp->checkint);
1235 pp->tick = pp->checkint;
1238 else if (newstate == PATH_DOWN) {
1239 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1240 LOG_MSG(3, checker_message(&pp->checker));
1242 LOG_MSG(2, checker_message(&pp->checker));
1245 pp->state = newstate;
1248 * path prio refreshing
1250 condlog(4, "path prio refresh");
1252 if (update_prio(pp, new_path_up) &&
1253 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1254 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1255 update_path_groups(pp->mpp, vecs, !new_path_up);
1256 else if (need_switch_pathgroup(pp->mpp, 0)) {
1257 if (pp->mpp->pgfailback > 0 &&
1258 (new_path_up || pp->mpp->failback_tick <= 0))
1259 pp->mpp->failback_tick =
1260 pp->mpp->pgfailback + 1;
1261 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1262 (chkr_new_path_up && followover_should_failback(pp)))
1263 switch_pathgroup(pp->mpp);
1269 checkerloop (void *ap)
1271 struct vectors *vecs;
1276 mlockall(MCL_CURRENT | MCL_FUTURE);
1277 vecs = (struct vectors *)ap;
1278 condlog(2, "path checkers start up");
1281 * init the path check interval
1283 vector_foreach_slot (vecs->pathvec, pp, i) {
1284 pp->checkint = conf->checkint;
1288 struct timeval diff_time, start_time, end_time;
1291 if (gettimeofday(&start_time, NULL) != 0)
1292 start_time.tv_sec = 0;
1293 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1295 pthread_testcancel();
1298 if (vecs->pathvec) {
1299 vector_foreach_slot (vecs->pathvec, pp, i) {
1300 num_paths += check_path(vecs, pp);
1304 defered_failback_tick(vecs->mpvec);
1305 retry_count_tick(vecs->mpvec);
1310 condlog(4, "map garbage collection");
1311 mpvec_garbage_collector(vecs);
1315 lock_cleanup_pop(vecs->lock);
1316 if (start_time.tv_sec &&
1317 gettimeofday(&end_time, NULL) == 0 &&
1319 timersub(&end_time, &start_time, &diff_time);
1320 condlog(3, "checked %d path%s in %lu.%06lu secs",
1321 num_paths, num_paths > 1 ? "s" : "",
1322 diff_time.tv_sec, diff_time.tv_usec);
1330 configure (struct vectors * vecs, int start_waiters)
1332 struct multipath * mpp;
1337 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1340 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1343 if (!(mpvec = vector_alloc()))
1347 * probe for current path (from sysfs) and map (from dm) sets
1349 path_discovery(vecs->pathvec, conf, DI_ALL);
1351 vector_foreach_slot (vecs->pathvec, pp, i){
1352 if (filter_path(conf, pp) > 0){
1353 vector_del_slot(vecs->pathvec, i);
1358 pp->checkint = conf->checkint;
1360 if (map_discovery(vecs))
1364 * create new set of maps & push changed ones into dm
1366 if (coalesce_paths(vecs, mpvec, NULL, 1))
1370 * may need to remove some maps which are no longer relevant
1371 * e.g., due to blacklist changes in conf file
1373 if (coalesce_maps(vecs, mpvec))
1378 sync_maps_state(mpvec);
1379 vector_foreach_slot(mpvec, mpp, i){
1380 remember_wwid(mpp->wwid);
1385 * purge dm of old maps
1390 * save new set of maps formed by considering current path state
1392 vector_free(vecs->mpvec);
1393 vecs->mpvec = mpvec;
1396 * start dm event waiter threads for these new maps
1398 vector_foreach_slot(vecs->mpvec, mpp, i) {
1399 if (setup_multipath(vecs, mpp))
1402 if (start_waiter_thread(mpp, vecs))
1409 reconfigure (struct vectors * vecs)
1411 struct config * old = conf;
1415 * free old map and path vectors ... they use old conf state
1417 if (VECTOR_SIZE(vecs->mpvec))
1418 remove_maps_and_stop_waiters(vecs);
1420 if (VECTOR_SIZE(vecs->pathvec))
1421 free_pathvec(vecs->pathvec, FREE_PATHS);
1423 vecs->pathvec = NULL;
1426 if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1427 conf->verbosity = old->verbosity;
1437 static struct vectors *
1440 struct vectors * vecs;
1442 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1448 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1450 if (!vecs->lock.mutex)
1453 pthread_mutex_init(vecs->lock.mutex, NULL);
1454 vecs->lock.depth = 0;
1460 condlog(0, "failed to init paths");
1465 signal_set(int signo, void (*func) (int))
1468 struct sigaction sig;
1469 struct sigaction osig;
1471 sig.sa_handler = func;
1472 sigemptyset(&sig.sa_mask);
1475 r = sigaction(signo, &sig, &osig);
1480 return (osig.sa_handler);
1484 handle_signals(void)
1486 if (reconfig_sig && running_state == DAEMON_RUNNING) {
1487 condlog(2, "reconfigure (signal)");
1488 pthread_cleanup_push(cleanup_lock,
1491 pthread_testcancel();
1493 lock_cleanup_pop(gvecs->lock);
1495 if (log_reset_sig) {
1496 condlog(2, "reset log (signal)");
1497 pthread_mutex_lock(&logq_lock);
1498 log_reset("multipathd");
1499 pthread_mutex_unlock(&logq_lock);
1526 condlog(3, "SIGUSR2 received");
1535 sigaddset(&set, SIGHUP);
1536 sigaddset(&set, SIGUSR1);
1537 sigaddset(&set, SIGUSR2);
1538 pthread_sigmask(SIG_BLOCK, &set, NULL);
1540 signal_set(SIGHUP, sighup);
1541 signal_set(SIGUSR1, sigusr1);
1542 signal_set(SIGUSR2, sigusr2);
1543 signal_set(SIGINT, sigend);
1544 signal_set(SIGTERM, sigend);
1545 signal(SIGPIPE, SIG_IGN);
1552 static struct sched_param sched_param = {
1553 .sched_priority = 99
1556 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1559 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1566 #ifdef OOM_SCORE_ADJ_MIN
1568 char *file = "/proc/self/oom_score_adj";
1569 int score = OOM_SCORE_ADJ_MIN;
1572 char *file = "/proc/self/oom_adj";
1573 int score = OOM_ADJUST_MIN;
1579 envp = getenv("OOMScoreAdjust");
1581 condlog(3, "Using systemd provided OOMScoreAdjust");
1585 if (stat(file, &st) == 0){
1586 fp = fopen(file, "w");
1588 condlog(0, "couldn't fopen %s : %s", file,
1592 fprintf(fp, "%i", score);
1596 if (errno != ENOENT) {
1597 condlog(0, "couldn't stat %s : %s", file,
1601 #ifdef OOM_ADJUST_MIN
1602 file = "/proc/self/oom_adj";
1603 score = OOM_ADJUST_MIN;
1608 condlog(0, "couldn't adjust oom score");
1612 child (void * param)
1614 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1615 pthread_attr_t log_attr, misc_attr, uevent_attr;
1616 struct vectors * vecs;
1617 struct multipath * mpp;
1622 mlockall(MCL_CURRENT | MCL_FUTURE);
1623 sem_init(&exit_sem, 0, 0);
1628 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1629 setup_thread_attr(&uevent_attr, 128 * 1024, 1);
1630 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1633 setup_thread_attr(&log_attr, 64 * 1024, 0);
1634 log_thread_start(&log_attr);
1635 pthread_attr_destroy(&log_attr);
1638 running_state = DAEMON_START;
1641 sd_notify(0, "STATUS=startup");
1643 condlog(2, "--------start up--------");
1644 condlog(2, "read " DEFAULT_CONFIGFILE);
1646 if (load_config(DEFAULT_CONFIGFILE, udev))
1649 if (init_checkers()) {
1650 condlog(0, "failed to initialize checkers");
1654 condlog(0, "failed to initialize prioritizers");
1658 setlogmask(LOG_UPTO(conf->verbosity + 3));
1660 envp = getenv("LimitNOFILE");
1663 condlog(2,"Using systemd provided open fds limit of %s", envp);
1664 } else if (conf->max_fds) {
1665 struct rlimit fd_limit;
1667 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1668 condlog(0, "can't get open fds limit: %s",
1670 fd_limit.rlim_cur = 0;
1671 fd_limit.rlim_max = 0;
1673 if (fd_limit.rlim_cur < conf->max_fds) {
1674 fd_limit.rlim_cur = conf->max_fds;
1675 if (fd_limit.rlim_max < conf->max_fds)
1676 fd_limit.rlim_max = conf->max_fds;
1677 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1678 condlog(0, "can't set open fds limit to "
1680 fd_limit.rlim_cur, fd_limit.rlim_max,
1683 condlog(3, "set open fds limit to %lu/%lu",
1684 fd_limit.rlim_cur, fd_limit.rlim_max);
1690 vecs = gvecs = init_vecs();
1698 udev_set_sync_support(0);
1700 * Start uevent listener early to catch events
1702 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
1703 condlog(0, "failed to create uevent thread: %d", rc);
1706 pthread_attr_destroy(&uevent_attr);
1707 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1708 condlog(0, "failed to create cli listener: %d", rc);
1712 * fetch and configure both paths and multipaths
1715 sd_notify(0, "STATUS=configure");
1717 running_state = DAEMON_CONFIGURE;
1720 if (configure(vecs, 1)) {
1722 condlog(0, "failure during configuration");
1730 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1731 condlog(0,"failed to create checker loop thread: %d", rc);
1734 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1735 condlog(0, "failed to create uevent dispatcher: %d", rc);
1738 pthread_attr_destroy(&misc_attr);
1740 /* Startup complete, create logfile */
1741 pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1742 /* Ignore errors, we can live without */
1744 running_state = DAEMON_RUNNING;
1746 sd_notify(0, "READY=1\nSTATUS=running");
1752 while(sem_wait(&exit_sem) != 0); /* Do nothing */
1755 sd_notify(0, "STATUS=shutdown");
1757 running_state = DAEMON_SHUTDOWN;
1759 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1760 vector_foreach_slot(vecs->mpvec, mpp, i)
1761 dm_queue_if_no_path(mpp->alias, 0);
1762 remove_maps_and_stop_waiters(vecs);
1765 pthread_cancel(check_thr);
1766 pthread_cancel(uevent_thr);
1767 pthread_cancel(uxlsnr_thr);
1768 pthread_cancel(uevq_thr);
1771 free_pathvec(vecs->pathvec, FREE_PATHS);
1772 vecs->pathvec = NULL;
1774 /* Now all the waitevent threads will start rushing in. */
1775 while (vecs->lock.depth > 0) {
1776 sleep (1); /* This is weak. */
1777 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1778 " waiting...", vecs->lock.depth);
1780 pthread_mutex_destroy(vecs->lock.mutex);
1781 FREE(vecs->lock.mutex);
1782 vecs->lock.depth = 0;
1783 vecs->lock.mutex = NULL;
1793 /* We're done here */
1795 condlog(3, "unlink pidfile");
1796 unlink(DEFAULT_PIDFILE);
1799 condlog(2, "--------shut down-------");
1805 * Freeing config must be done after condlog() and dm_lib_exit(),
1806 * because logging functions like dlog() and dm_write_log()
1807 * reference the config.
1814 dbg_free_final(NULL);
1818 sd_notify(0, "ERRNO=0");
1824 sd_notify(0, "ERRNO=1");
1835 if( (pid = fork()) < 0){
1836 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1844 if ( (pid = fork()) < 0)
1845 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1850 fprintf(stderr, "cannot chdir to '/', continuing\n");
1852 dev_null_fd = open("/dev/null", O_RDWR);
1853 if (dev_null_fd < 0){
1854 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1859 close(STDIN_FILENO);
1860 if (dup(dev_null_fd) < 0) {
1861 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1865 close(STDOUT_FILENO);
1866 if (dup(dev_null_fd) < 0) {
1867 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1871 close(STDERR_FILENO);
1872 if (dup(dev_null_fd) < 0) {
1873 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1878 daemon_pid = getpid();
1883 main (int argc, char *argv[])
1885 extern char *optarg;
1891 running_state = DAEMON_INIT;
1894 if (getuid() != 0) {
1895 fprintf(stderr, "need to be root\n");
1899 /* make sure we don't lock any path */
1901 fprintf(stderr, "can't chdir to root directory : %s\n",
1903 umask(umask(077) | 022);
1905 conf = alloc_config();
1910 while ((arg = getopt(argc, argv, ":dsv:k::")) != EOF ) {
1914 //debug=1; /* ### comment me out ### */
1917 if (sizeof(optarg) > sizeof(char *) ||
1918 !isdigit(optarg[0]))
1921 conf->verbosity = atoi(optarg);
1933 if (optind < argc) {
1938 while (optind < argc) {
1939 if (strchr(argv[optind], ' '))
1940 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1942 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1945 c += snprintf(c, s + CMDSIZE - c, "\n");
1963 return (child(NULL));
1966 void * mpath_pr_event_handler_fn (void * pathp )
1968 struct multipath * mpp;
1969 int i,j, ret, isFound;
1970 struct path * pp = (struct path *)pathp;
1971 unsigned char *keyp;
1973 struct prout_param_descriptor *param;
1974 struct prin_resp *resp;
1978 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1980 condlog(0,"%s Alloc failed for prin response", pp->dev);
1984 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1985 if (ret != MPATH_PR_SUCCESS )
1987 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1991 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1992 resp->prin_descriptor.prin_readkeys.additional_length );
1994 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1996 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1997 ret = MPATH_PR_SUCCESS;
2001 keyp = (unsigned char *)mpp->reservation_key;
2002 for (j = 0; j < 8; ++j) {
2008 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
2011 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2013 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
2014 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2015 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2017 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2024 condlog(0, "%s: Either device not registered or ", pp->dev);
2025 condlog(0, "host is not authorised for registration. Skip path");
2026 ret = MPATH_PR_OTHER;
2030 param= malloc(sizeof(struct prout_param_descriptor));
2031 memset(param, 0 , sizeof(struct prout_param_descriptor));
2033 for (j = 7; j >= 0; --j) {
2034 param->sa_key[j] = (prkey & 0xff);
2037 param->num_transportid = 0;
2039 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2041 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2042 if (ret != MPATH_PR_SUCCESS )
2044 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2054 int mpath_pr_event_handle(struct path *pp)
2058 pthread_attr_t attr;
2059 struct multipath * mpp;
2063 if (!mpp->reservation_key)
2066 pthread_attr_init(&attr);
2067 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2069 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2071 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2074 pthread_attr_destroy(&attr);
2075 rc = pthread_join(thread, NULL);