2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
21 #include <systemd/sd-daemon.h>
23 #include <semaphore.h>
24 #include <mpath_persist.h>
42 #include <blacklist.h>
43 #include <structs_vec.h>
45 #include <devmapper.h>
48 #include <discovery.h>
52 #include <switchgroup.h>
54 #include <configure.h>
56 #include <pgpolicies.h>
65 #include "cli_handlers.h"
70 #define FILE_NAME_SIZE 256
73 #define LOG_MSG(a, b) \
76 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
78 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
81 struct mpath_event_param
84 struct multipath *mpp;
87 unsigned int mpath_mx_alloc_len;
90 enum daemon_status running_state;
93 static sem_t exit_sem;
95 * global copy of vecs for use in sig handlers
97 struct vectors * gvecs;
102 need_switch_pathgroup (struct multipath * mpp, int refresh)
104 struct pathgroup * pgp;
108 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
112 * Refresh path priority values
115 vector_foreach_slot (mpp->pg, pgp, i)
116 vector_foreach_slot (pgp->paths, pp, j)
117 pathinfo(pp, conf->hwtable, DI_PRIO);
119 mpp->bestpg = select_path_group(mpp);
121 if (mpp->bestpg != mpp->nextpg)
128 switch_pathgroup (struct multipath * mpp)
130 mpp->stat_switchgroup++;
131 dm_switchgroup(mpp->alias, mpp->bestpg);
132 condlog(2, "%s: switch to path group #%i",
133 mpp->alias, mpp->bestpg);
137 coalesce_maps(struct vectors *vecs, vector nmpv)
139 struct multipath * ompp;
140 vector ompv = vecs->mpvec;
143 vector_foreach_slot (ompv, ompp, i) {
144 condlog(3, "%s: coalesce map", ompp->alias);
145 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
147 * remove all current maps not allowed by the
148 * current configuration
150 if (dm_flush_map(ompp->alias)) {
151 condlog(0, "%s: unable to flush devmap",
154 * may be just because the device is open
156 if (setup_multipath(vecs, ompp) != 0) {
160 if (!vector_alloc_slot(nmpv))
163 vector_set_slot(nmpv, ompp);
165 vector_del_slot(ompv, i);
170 condlog(2, "%s devmap removed", ompp->alias);
172 } else if (conf->reassign_maps) {
173 condlog(3, "%s: Reassign existing device-mapper"
174 " devices", ompp->alias);
175 dm_reassign(ompp->alias);
182 sync_map_state(struct multipath *mpp)
184 struct pathgroup *pgp;
191 vector_foreach_slot (mpp->pg, pgp, i){
192 vector_foreach_slot (pgp->paths, pp, j){
193 if (pp->state == PATH_UNCHECKED ||
194 pp->state == PATH_WILD)
196 if ((pp->dmstate == PSTATE_FAILED ||
197 pp->dmstate == PSTATE_UNDEF) &&
198 (pp->state == PATH_UP || pp->state == PATH_GHOST))
199 dm_reinstate_path(mpp->alias, pp->dev_t);
200 else if ((pp->dmstate == PSTATE_ACTIVE ||
201 pp->dmstate == PSTATE_UNDEF) &&
202 (pp->state == PATH_DOWN ||
203 pp->state == PATH_SHAKY))
204 dm_fail_path(mpp->alias, pp->dev_t);
210 sync_maps_state(vector mpvec)
213 struct multipath *mpp;
215 vector_foreach_slot (mpvec, mpp, i)
220 flush_map(struct multipath * mpp, struct vectors * vecs)
223 * clear references to this map before flushing so we can ignore
224 * the spurious uevent we may generate with the dm_flush_map call below
226 if (dm_flush_map(mpp->alias)) {
228 * May not really be an error -- if the map was already flushed
229 * from the device mapper by dmsetup(8) for instance.
231 condlog(0, "%s: can't flush", mpp->alias);
236 condlog(2, "%s: map flushed", mpp->alias);
239 orphan_paths(vecs->pathvec, mpp);
240 remove_map_and_stop_waiter(mpp, vecs, 1);
246 uev_add_map (struct uevent * uev, struct vectors * vecs)
249 int major = -1, minor = -1, rc;
251 condlog(3, "%s: add map (uevent)", uev->kernel);
252 alias = uevent_get_dm_name(uev);
254 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
255 major = uevent_get_major(uev);
256 minor = uevent_get_minor(uev);
257 alias = dm_mapname(major, minor);
259 condlog(2, "%s: mapname not found for %d:%d",
260 uev->kernel, major, minor);
264 rc = ev_add_map(uev->kernel, alias, vecs);
270 ev_add_map (char * dev, char * alias, struct vectors * vecs)
273 struct multipath * mpp;
277 map_present = dm_map_present(alias);
279 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
280 condlog(4, "%s: not a multipath map", alias);
284 mpp = find_mp_by_alias(vecs->mpvec, alias);
288 * Not really an error -- we generate our own uevent
289 * if we create a multipath mapped device as a result
292 if (conf->reassign_maps) {
293 condlog(3, "%s: Reassign existing device-mapper devices",
299 condlog(2, "%s: adding map", alias);
302 * now we can register the map
304 if (map_present && (mpp = add_map_without_path(vecs, alias))) {
306 condlog(2, "%s: devmap %s registered", alias, dev);
309 r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
312 r = coalesce_paths(vecs, NULL, refwwid, 0);
317 condlog(2, "%s: devmap %s added", alias, dev);
319 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
321 condlog(0, "%s: uev_add_map %s failed", alias, dev);
328 uev_remove_map (struct uevent * uev, struct vectors * vecs)
332 struct multipath *mpp;
334 condlog(2, "%s: remove map (uevent)", uev->kernel);
335 alias = uevent_get_dm_name(uev);
337 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
340 minor = uevent_get_minor(uev);
341 mpp = find_mp_by_minor(vecs->mpvec, minor);
344 condlog(2, "%s: devmap not registered, can't remove",
348 if (strcmp(mpp->alias, alias)) {
349 condlog(2, "%s: minor number mismatch (map %d, event %d)",
350 mpp->alias, mpp->dmi->minor, minor);
354 orphan_paths(vecs->pathvec, mpp);
355 remove_map_and_stop_waiter(mpp, vecs, 1);
362 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
364 struct multipath * mpp;
366 mpp = find_mp_by_minor(vecs->mpvec, minor);
369 condlog(2, "%s: devmap not registered, can't remove",
373 if (strcmp(mpp->alias, alias)) {
374 condlog(2, "%s: minor number mismatch (map %d, event %d)",
375 mpp->alias, mpp->dmi->minor, minor);
378 return flush_map(mpp, vecs);
382 uev_add_path (struct uevent *uev, struct vectors * vecs)
387 condlog(2, "%s: add path (uevent)", uev->kernel);
388 if (strstr(uev->kernel, "..") != NULL) {
390 * Don't allow relative device names in the pathvec
392 condlog(0, "%s: path name is invalid", uev->kernel);
396 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
398 condlog(0, "%s: spurious uevent, path already in pathvec",
402 if (!strlen(pp->wwid)) {
403 udev_device_unref(pp->udev);
404 pp->udev = udev_device_ref(uev->udev);
405 ret = pathinfo(pp, conf->hwtable,
406 DI_ALL | DI_BLACKLIST);
408 i = find_slot(vecs->pathvec, (void *)pp);
410 vector_del_slot(vecs->pathvec, i);
413 } else if (ret == 1) {
414 condlog(0, "%s: failed to reinitialize path",
421 * get path vital state
423 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
424 uev->udev, DI_ALL, &pp);
428 condlog(0, "%s: failed to store path info",
432 pp->checkint = conf->checkint;
435 return ev_add_path(pp, vecs);
444 ev_add_path (struct path * pp, struct vectors * vecs)
446 struct multipath * mpp;
447 char empty_buff[WWID_SIZE] = {0};
448 char params[PARAMS_SIZE] = {0};
450 int start_waiter = 0;
453 * need path UID to go any further
455 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
456 condlog(0, "%s: failed to get path uid", pp->dev);
457 goto fail; /* leave path added to pathvec */
459 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
462 if ((!pp->size) || (mpp->size != pp->size)) {
464 condlog(0, "%s: failed to add new path %s, "
466 mpp->alias, pp->dev);
468 condlog(0, "%s: failed to add new path %s, "
469 "device size mismatch",
470 mpp->alias, pp->dev);
471 int i = find_slot(vecs->pathvec, (void *)pp);
473 vector_del_slot(vecs->pathvec, i);
478 condlog(4,"%s: adopting all paths for path %s",
479 mpp->alias, pp->dev);
480 if (adopt_paths(vecs->pathvec, mpp, 1))
481 goto fail; /* leave path added to pathvec */
483 verify_paths(mpp, vecs, NULL);
484 mpp->flush_on_last_del = FLUSH_UNDEF;
485 mpp->action = ACT_RELOAD;
489 condlog(0, "%s: failed to create new map,"
490 " device size is 0 ", pp->dev);
491 int i = find_slot(vecs->pathvec, (void *)pp);
493 vector_del_slot(vecs->pathvec, i);
498 condlog(4,"%s: creating new map", pp->dev);
499 if ((mpp = add_map_with_path(vecs, pp, 1))) {
500 mpp->action = ACT_CREATE;
502 * We don't depend on ACT_CREATE, as domap will
503 * set it to ACT_NOTHING when complete.
508 goto fail; /* leave path added to pathvec */
511 /* persistent reseravtion check*/
512 mpath_pr_event_handle(pp);
515 * push the map to the device-mapper
517 if (setup_map(mpp, params, PARAMS_SIZE)) {
518 condlog(0, "%s: failed to setup map for addition of new "
519 "path %s", mpp->alias, pp->dev);
523 * reload the map for the multipath mapped device
525 if (domap(mpp, params) <= 0) {
526 condlog(0, "%s: failed in domap for addition of new "
527 "path %s", mpp->alias, pp->dev);
529 * deal with asynchronous uevents :((
531 if (mpp->action == ACT_RELOAD && retries-- > 0) {
532 condlog(0, "%s: uev_add_path sleep", mpp->alias);
534 update_mpp_paths(mpp, vecs->pathvec);
537 else if (mpp->action == ACT_RELOAD)
538 condlog(0, "%s: giving up reload", mpp->alias);
545 * update our state from kernel regardless of create or reload
547 if (setup_multipath(vecs, mpp))
548 goto fail; /* if setup_multipath fails, it removes the map */
552 if ((mpp->action == ACT_CREATE ||
553 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
554 start_waiter_thread(mpp, vecs))
558 condlog(2, "%s [%s]: path added to devmap %s",
559 pp->dev, pp->dev_t, mpp->alias);
566 remove_map(mpp, vecs, 1);
568 orphan_path(pp, "failed to add path");
573 uev_remove_path (struct uevent *uev, struct vectors * vecs)
577 condlog(2, "%s: remove path (uevent)", uev->kernel);
578 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
581 /* Not an error; path might have been purged earlier */
582 condlog(0, "%s: path already removed", uev->kernel);
586 return ev_remove_path(pp, vecs);
590 ev_remove_path (struct path *pp, struct vectors * vecs)
592 struct multipath * mpp;
594 char params[PARAMS_SIZE] = {0};
597 * avoid referring to the map of an orphaned path
599 if ((mpp = pp->mpp)) {
601 * transform the mp->pg vector of vectors of paths
602 * into a mp->params string to feed the device-mapper
604 if (update_mpp_paths(mpp, vecs->pathvec)) {
605 condlog(0, "%s: failed to update paths",
609 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
610 vector_del_slot(mpp->paths, i);
613 * remove the map IFF removing the last path
615 if (VECTOR_SIZE(mpp->paths) == 0) {
616 char alias[WWID_SIZE];
619 * flush_map will fail if the device is open
621 strncpy(alias, mpp->alias, WWID_SIZE);
622 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
623 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
625 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
626 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
627 dm_queue_if_no_path(mpp->alias, 0);
629 if (!flush_map(mpp, vecs)) {
630 condlog(2, "%s: removed map after"
631 " removing all paths",
637 * Not an error, continue
641 if (setup_map(mpp, params, PARAMS_SIZE)) {
642 condlog(0, "%s: failed to setup map for"
643 " removal of path %s", mpp->alias, pp->dev);
649 mpp->action = ACT_RELOAD;
650 if (domap(mpp, params) <= 0) {
651 condlog(0, "%s: failed in domap for "
652 "removal of path %s",
653 mpp->alias, pp->dev);
657 * update our state from kernel
659 if (setup_multipath(vecs, mpp)) {
664 condlog(2, "%s [%s]: path removed from map %s",
665 pp->dev, pp->dev_t, mpp->alias);
670 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
671 vector_del_slot(vecs->pathvec, i);
678 remove_map_and_stop_waiter(mpp, vecs, 1);
683 uev_update_path (struct uevent *uev, struct vectors * vecs)
687 ro = uevent_get_disk_ro(uev);
692 condlog(2, "%s: update path write_protect to '%d' (uevent)",
694 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
696 condlog(0, "%s: spurious uevent, path not found",
701 retval = reload_map(vecs, pp->mpp, 0);
703 condlog(2, "%s: map %s reloaded (retval %d)",
704 uev->kernel, pp->mpp->alias, retval);
713 map_discovery (struct vectors * vecs)
715 struct multipath * mpp;
718 if (dm_get_maps(vecs->mpvec))
721 vector_foreach_slot (vecs->mpvec, mpp, i)
722 if (setup_multipath(vecs, mpp))
729 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
731 struct vectors * vecs;
736 vecs = (struct vectors *)trigger_data;
738 pthread_cleanup_push(cleanup_lock, &vecs->lock);
740 pthread_testcancel();
742 r = parse_cmd(str, reply, len, vecs);
745 *reply = STRDUP("fail\n");
746 *len = strlen(*reply) + 1;
749 else if (!r && *len == 0) {
750 *reply = STRDUP("ok\n");
751 *len = strlen(*reply) + 1;
754 /* else if (r < 0) leave *reply alone */
756 lock_cleanup_pop(vecs->lock);
761 uev_discard(char * devpath)
767 * keep only block devices, discard partitions
769 tmp = strstr(devpath, "/block/");
771 condlog(4, "no /block/ in '%s'", devpath);
774 if (sscanf(tmp, "/block/%10s", a) != 1 ||
775 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
776 condlog(4, "discard event on %s", devpath);
783 uev_trigger (struct uevent * uev, void * trigger_data)
786 struct vectors * vecs;
788 vecs = (struct vectors *)trigger_data;
790 if (uev_discard(uev->devpath))
793 pthread_cleanup_push(cleanup_lock, &vecs->lock);
795 pthread_testcancel();
799 * Add events are ignored here as the tables
800 * are not fully initialised then.
802 if (!strncmp(uev->kernel, "dm-", 3)) {
803 if (!strncmp(uev->action, "change", 6)) {
804 r = uev_add_map(uev, vecs);
807 if (!strncmp(uev->action, "remove", 6)) {
808 r = uev_remove_map(uev, vecs);
815 * path add/remove event
817 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
821 if (!strncmp(uev->action, "add", 3)) {
822 r = uev_add_path(uev, vecs);
825 if (!strncmp(uev->action, "remove", 6)) {
826 r = uev_remove_path(uev, vecs);
829 if (!strncmp(uev->action, "change", 6)) {
830 r = uev_update_path(uev, vecs);
835 lock_cleanup_pop(vecs->lock);
840 ueventloop (void * ap)
842 if (uevent_listen(udev))
843 condlog(0, "error starting uevent listener");
851 if (uevent_dispatch(&uev_trigger, ap))
852 condlog(0, "error starting uevent dispatcher");
857 uxlsnrloop (void * ap)
862 set_handler_callback(LIST+PATHS, cli_list_paths);
863 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
864 set_handler_callback(LIST+MAPS, cli_list_maps);
865 set_handler_callback(LIST+STATUS, cli_list_status);
866 set_handler_callback(LIST+DAEMON, cli_list_daemon);
867 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
868 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
869 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
870 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
871 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
872 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
873 set_handler_callback(LIST+CONFIG, cli_list_config);
874 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
875 set_handler_callback(LIST+DEVICES, cli_list_devices);
876 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
877 set_handler_callback(ADD+PATH, cli_add_path);
878 set_handler_callback(DEL+PATH, cli_del_path);
879 set_handler_callback(ADD+MAP, cli_add_map);
880 set_handler_callback(DEL+MAP, cli_del_map);
881 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
882 set_handler_callback(RECONFIGURE, cli_reconfigure);
883 set_handler_callback(SUSPEND+MAP, cli_suspend);
884 set_handler_callback(RESUME+MAP, cli_resume);
885 set_handler_callback(RESIZE+MAP, cli_resize);
886 set_handler_callback(RELOAD+MAP, cli_reload);
887 set_handler_callback(RESET+MAP, cli_reassign);
888 set_handler_callback(REINSTATE+PATH, cli_reinstate);
889 set_handler_callback(FAIL+PATH, cli_fail);
890 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
891 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
892 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
893 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
894 set_handler_callback(QUIT, cli_quit);
895 set_handler_callback(SHUTDOWN, cli_shutdown);
896 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
897 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
898 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
899 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
900 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
903 uxsock_listen(&uxsock_trigger, ap);
917 switch (running_state) {
922 case DAEMON_CONFIGURE:
926 case DAEMON_SHUTDOWN:
933 fail_path (struct path * pp, int del_active)
938 condlog(2, "checker failed path %s in map %s",
939 pp->dev_t, pp->mpp->alias);
941 dm_fail_path(pp->mpp->alias, pp->dev_t);
943 update_queue_mode_del_path(pp->mpp);
947 * caller must have locked the path list before calling that function
950 reinstate_path (struct path * pp, int add_active)
955 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
956 condlog(0, "%s: reinstate failed", pp->dev_t);
958 condlog(2, "%s: reinstated", pp->dev_t);
960 update_queue_mode_add_path(pp->mpp);
965 enable_group(struct path * pp)
967 struct pathgroup * pgp;
970 * if path is added through uev_add_path, pgindex can be unset.
971 * next update_strings() will set it, upon map reload event.
973 * we can safely return here, because upon map reload, all
974 * PG will be enabled.
976 if (!pp->mpp->pg || !pp->pgindex)
979 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
981 if (pgp->status == PGSTATE_DISABLED) {
982 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
983 dm_enablegroup(pp->mpp->alias, pp->pgindex);
988 mpvec_garbage_collector (struct vectors * vecs)
990 struct multipath * mpp;
996 vector_foreach_slot (vecs->mpvec, mpp, i) {
997 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
998 condlog(2, "%s: remove dead map", mpp->alias);
999 remove_map_and_stop_waiter(mpp, vecs, 1);
1005 /* This is called after a path has started working again. It the multipath
1006 * device for this path uses the followover failback type, and this is the
1007 * best pathgroup, and this is the first path in the pathgroup to come back
1008 * up, then switch to this pathgroup */
1010 followover_should_failback(struct path * pp)
1012 struct pathgroup * pgp;
1016 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1017 !pp->mpp->pg || !pp->pgindex ||
1018 pp->pgindex != pp->mpp->bestpg)
1021 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1022 vector_foreach_slot(pgp->paths, pp1, i) {
1025 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1032 defered_failback_tick (vector mpvec)
1034 struct multipath * mpp;
1037 vector_foreach_slot (mpvec, mpp, i) {
1039 * defered failback getting sooner
1041 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1042 mpp->failback_tick--;
1044 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1045 switch_pathgroup(mpp);
1051 retry_count_tick(vector mpvec)
1053 struct multipath *mpp;
1056 vector_foreach_slot (mpvec, mpp, i) {
1057 if (mpp->retry_tick) {
1058 mpp->stat_total_queueing_time++;
1059 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1060 if(--mpp->retry_tick == 0) {
1061 dm_queue_if_no_path(mpp->alias, 0);
1062 condlog(2, "%s: Disable queueing", mpp->alias);
1068 int update_prio(struct path *pp, int refresh_all)
1072 struct pathgroup * pgp;
1073 int i, j, changed = 0;
1076 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1077 vector_foreach_slot (pgp->paths, pp1, j) {
1078 oldpriority = pp1->priority;
1079 pathinfo(pp1, conf->hwtable, DI_PRIO);
1080 if (pp1->priority != oldpriority)
1086 oldpriority = pp->priority;
1087 pathinfo(pp, conf->hwtable, DI_PRIO);
1089 if (pp->priority == oldpriority)
1094 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1096 if (reload_map(vecs, mpp, refresh))
1100 if (setup_multipath(vecs, mpp) != 0)
1102 sync_map_state(mpp);
1108 check_path (struct vectors * vecs, struct path * pp)
1111 int new_path_up = 0;
1112 int chkr_new_path_up = 0;
1113 int oldchkrstate = pp->chkrstate;
1118 if (pp->tick && --pp->tick)
1119 return 0; /* don't check this path yet */
1122 * provision a next check soonest,
1123 * in case we exit abnormaly from here
1125 pp->tick = conf->checkint;
1127 newstate = path_offline(pp);
1128 if (newstate == PATH_REMOVED) {
1129 condlog(2, "%s: remove path (checker)", pp->dev);
1130 ev_remove_path(pp, vecs);
1133 if (newstate == PATH_UP)
1134 newstate = get_state(pp, 1);
1136 checker_clear_message(&pp->checker);
1138 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1139 condlog(2, "%s: unusable path", pp->dev);
1140 pathinfo(pp, conf->hwtable, 0);
1144 * Async IO in flight. Keep the previous path state
1145 * and reschedule as soon as possible
1147 if (newstate == PATH_PENDING) {
1152 * Synchronize with kernel state
1154 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1155 condlog(1, "%s: Could not synchronize with kernel state",
1157 pp->dmstate = PSTATE_UNDEF;
1159 pp->chkrstate = newstate;
1160 if (newstate != pp->state) {
1161 int oldstate = pp->state;
1162 pp->state = newstate;
1164 if (strlen(checker_message(&pp->checker)))
1165 LOG_MSG(1, checker_message(&pp->checker));
1168 * upon state change, reset the checkint
1169 * to the shortest delay
1171 pp->checkint = conf->checkint;
1173 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1175 * proactively fail path in the DM
1177 if (oldstate == PATH_UP ||
1178 oldstate == PATH_GHOST)
1184 * cancel scheduled failback
1186 pp->mpp->failback_tick = 0;
1188 pp->mpp->stat_path_failures++;
1192 if(newstate == PATH_UP || newstate == PATH_GHOST){
1193 if ( pp->mpp && pp->mpp->prflag ){
1195 * Check Persistent Reservation.
1197 condlog(2, "%s: checking persistent reservation "
1198 "registration", pp->dev);
1199 mpath_pr_event_handle(pp);
1204 * reinstate this path
1206 if (oldstate != PATH_UP &&
1207 oldstate != PATH_GHOST)
1208 reinstate_path(pp, 1);
1210 reinstate_path(pp, 0);
1214 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1215 chkr_new_path_up = 1;
1218 * if at least one path is up in a group, and
1219 * the group is disabled, re-enable it
1221 if (newstate == PATH_UP)
1224 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1225 if (pp->dmstate == PSTATE_FAILED ||
1226 pp->dmstate == PSTATE_UNDEF) {
1227 /* Clear IO errors */
1228 reinstate_path(pp, 0);
1230 LOG_MSG(4, checker_message(&pp->checker));
1231 if (pp->checkint != conf->max_checkint) {
1233 * double the next check delay.
1234 * max at conf->max_checkint
1236 if (pp->checkint < (conf->max_checkint / 2))
1237 pp->checkint = 2 * pp->checkint;
1239 pp->checkint = conf->max_checkint;
1241 condlog(4, "%s: delay next check %is",
1242 pp->dev_t, pp->checkint);
1244 pp->tick = pp->checkint;
1247 else if (newstate == PATH_DOWN &&
1248 strlen(checker_message(&pp->checker))) {
1249 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1250 LOG_MSG(3, checker_message(&pp->checker));
1252 LOG_MSG(2, checker_message(&pp->checker));
1255 pp->state = newstate;
1258 * path prio refreshing
1260 condlog(4, "path prio refresh");
1262 if (update_prio(pp, new_path_up) &&
1263 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1264 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1265 update_path_groups(pp->mpp, vecs, !new_path_up);
1266 else if (need_switch_pathgroup(pp->mpp, 0)) {
1267 if (pp->mpp->pgfailback > 0 &&
1268 (new_path_up || pp->mpp->failback_tick <= 0))
1269 pp->mpp->failback_tick =
1270 pp->mpp->pgfailback + 1;
1271 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1272 (chkr_new_path_up && followover_should_failback(pp)))
1273 switch_pathgroup(pp->mpp);
1279 checkerloop (void *ap)
1281 struct vectors *vecs;
1286 mlockall(MCL_CURRENT | MCL_FUTURE);
1287 vecs = (struct vectors *)ap;
1288 condlog(2, "path checkers start up");
1291 * init the path check interval
1293 vector_foreach_slot (vecs->pathvec, pp, i) {
1294 pp->checkint = conf->checkint;
1298 struct timeval diff_time, start_time, end_time;
1301 if (gettimeofday(&start_time, NULL) != 0)
1302 start_time.tv_sec = 0;
1303 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1305 pthread_testcancel();
1309 sd_notify(0, "WATCHDOG=1");
1311 if (vecs->pathvec) {
1312 vector_foreach_slot (vecs->pathvec, pp, i) {
1313 num_paths += check_path(vecs, pp);
1317 defered_failback_tick(vecs->mpvec);
1318 retry_count_tick(vecs->mpvec);
1323 condlog(4, "map garbage collection");
1324 mpvec_garbage_collector(vecs);
1328 lock_cleanup_pop(vecs->lock);
1329 if (start_time.tv_sec &&
1330 gettimeofday(&end_time, NULL) == 0 &&
1332 timersub(&end_time, &start_time, &diff_time);
1333 condlog(3, "checked %d path%s in %lu.%06lu secs",
1334 num_paths, num_paths > 1 ? "s" : "",
1335 diff_time.tv_sec, diff_time.tv_usec);
1343 configure (struct vectors * vecs, int start_waiters)
1345 struct multipath * mpp;
1350 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1353 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1356 if (!(mpvec = vector_alloc()))
1360 * probe for current path (from sysfs) and map (from dm) sets
1362 path_discovery(vecs->pathvec, conf, DI_ALL);
1364 vector_foreach_slot (vecs->pathvec, pp, i){
1365 if (filter_path(conf, pp) > 0){
1366 vector_del_slot(vecs->pathvec, i);
1371 pp->checkint = conf->checkint;
1373 if (map_discovery(vecs))
1377 * create new set of maps & push changed ones into dm
1379 if (coalesce_paths(vecs, mpvec, NULL, 1))
1383 * may need to remove some maps which are no longer relevant
1384 * e.g., due to blacklist changes in conf file
1386 if (coalesce_maps(vecs, mpvec))
1391 sync_maps_state(mpvec);
1392 vector_foreach_slot(mpvec, mpp, i){
1393 remember_wwid(mpp->wwid);
1398 * purge dm of old maps
1403 * save new set of maps formed by considering current path state
1405 vector_free(vecs->mpvec);
1406 vecs->mpvec = mpvec;
1409 * start dm event waiter threads for these new maps
1411 vector_foreach_slot(vecs->mpvec, mpp, i) {
1412 if (setup_multipath(vecs, mpp))
1415 if (start_waiter_thread(mpp, vecs))
1422 reconfigure (struct vectors * vecs)
1424 struct config * old = conf;
1428 * free old map and path vectors ... they use old conf state
1430 if (VECTOR_SIZE(vecs->mpvec))
1431 remove_maps_and_stop_waiters(vecs);
1433 if (VECTOR_SIZE(vecs->pathvec))
1434 free_pathvec(vecs->pathvec, FREE_PATHS);
1436 vecs->pathvec = NULL;
1439 /* Re-read any timezone changes */
1442 if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1443 conf->verbosity = old->verbosity;
1453 static struct vectors *
1456 struct vectors * vecs;
1458 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1464 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1466 if (!vecs->lock.mutex)
1469 pthread_mutex_init(vecs->lock.mutex, NULL);
1470 vecs->lock.depth = 0;
1476 condlog(0, "failed to init paths");
1481 signal_set(int signo, void (*func) (int))
1484 struct sigaction sig;
1485 struct sigaction osig;
1487 sig.sa_handler = func;
1488 sigemptyset(&sig.sa_mask);
1491 r = sigaction(signo, &sig, &osig);
1496 return (osig.sa_handler);
1500 handle_signals(void)
1502 if (reconfig_sig && running_state == DAEMON_RUNNING) {
1503 condlog(2, "reconfigure (signal)");
1504 pthread_cleanup_push(cleanup_lock,
1507 pthread_testcancel();
1509 lock_cleanup_pop(gvecs->lock);
1511 if (log_reset_sig) {
1512 condlog(2, "reset log (signal)");
1513 pthread_mutex_lock(&logq_lock);
1514 log_reset("multipathd");
1515 pthread_mutex_unlock(&logq_lock);
1542 condlog(3, "SIGUSR2 received");
1551 sigaddset(&set, SIGHUP);
1552 sigaddset(&set, SIGUSR1);
1553 sigaddset(&set, SIGUSR2);
1554 pthread_sigmask(SIG_BLOCK, &set, NULL);
1556 signal_set(SIGHUP, sighup);
1557 signal_set(SIGUSR1, sigusr1);
1558 signal_set(SIGUSR2, sigusr2);
1559 signal_set(SIGINT, sigend);
1560 signal_set(SIGTERM, sigend);
1561 signal(SIGPIPE, SIG_IGN);
1568 static struct sched_param sched_param = {
1569 .sched_priority = 99
1572 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1575 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1582 #ifdef OOM_SCORE_ADJ_MIN
1584 char *file = "/proc/self/oom_score_adj";
1585 int score = OOM_SCORE_ADJ_MIN;
1588 char *file = "/proc/self/oom_adj";
1589 int score = OOM_ADJUST_MIN;
1595 envp = getenv("OOMScoreAdjust");
1597 condlog(3, "Using systemd provided OOMScoreAdjust");
1601 if (stat(file, &st) == 0){
1602 fp = fopen(file, "w");
1604 condlog(0, "couldn't fopen %s : %s", file,
1608 fprintf(fp, "%i", score);
1612 if (errno != ENOENT) {
1613 condlog(0, "couldn't stat %s : %s", file,
1617 #ifdef OOM_ADJUST_MIN
1618 file = "/proc/self/oom_adj";
1619 score = OOM_ADJUST_MIN;
1624 condlog(0, "couldn't adjust oom score");
1628 child (void * param)
1630 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1631 pthread_attr_t log_attr, misc_attr, uevent_attr;
1632 struct vectors * vecs;
1633 struct multipath * mpp;
1636 unsigned long checkint;
1641 mlockall(MCL_CURRENT | MCL_FUTURE);
1642 sem_init(&exit_sem, 0, 0);
1647 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1648 setup_thread_attr(&uevent_attr, 128 * 1024, 1);
1649 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1652 setup_thread_attr(&log_attr, 64 * 1024, 0);
1653 log_thread_start(&log_attr);
1654 pthread_attr_destroy(&log_attr);
1657 running_state = DAEMON_START;
1660 sd_notify(0, "STATUS=startup");
1662 condlog(2, "--------start up--------");
1663 condlog(2, "read " DEFAULT_CONFIGFILE);
1665 if (load_config(DEFAULT_CONFIGFILE, udev))
1668 if (init_checkers()) {
1669 condlog(0, "failed to initialize checkers");
1673 condlog(0, "failed to initialize prioritizers");
1677 setlogmask(LOG_UPTO(conf->verbosity + 3));
1679 envp = getenv("LimitNOFILE");
1682 condlog(2,"Using systemd provided open fds limit of %s", envp);
1683 } else if (conf->max_fds) {
1684 struct rlimit fd_limit;
1686 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1687 condlog(0, "can't get open fds limit: %s",
1689 fd_limit.rlim_cur = 0;
1690 fd_limit.rlim_max = 0;
1692 if (fd_limit.rlim_cur < conf->max_fds) {
1693 fd_limit.rlim_cur = conf->max_fds;
1694 if (fd_limit.rlim_max < conf->max_fds)
1695 fd_limit.rlim_max = conf->max_fds;
1696 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1697 condlog(0, "can't set open fds limit to "
1699 fd_limit.rlim_cur, fd_limit.rlim_max,
1702 condlog(3, "set open fds limit to %lu/%lu",
1703 fd_limit.rlim_cur, fd_limit.rlim_max);
1709 vecs = gvecs = init_vecs();
1717 udev_set_sync_support(0);
1719 envp = getenv("WATCHDOG_USEC");
1720 if (envp && sscanf(envp, "%lu", &checkint) == 1) {
1721 /* Value is in microseconds */
1722 conf->max_checkint = checkint / 1000000;
1723 /* Rescale checkint */
1724 if (conf->checkint > conf->max_checkint)
1725 conf->checkint = conf->max_checkint;
1727 conf->checkint = conf->max_checkint / 4;
1728 condlog(3, "enabling watchdog, interval %d max %d",
1729 conf->checkint, conf->max_checkint);
1730 conf->watchdog = conf->checkint;
1734 * Start uevent listener early to catch events
1736 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
1737 condlog(0, "failed to create uevent thread: %d", rc);
1740 pthread_attr_destroy(&uevent_attr);
1741 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1742 condlog(0, "failed to create cli listener: %d", rc);
1746 * fetch and configure both paths and multipaths
1749 sd_notify(0, "STATUS=configure");
1751 running_state = DAEMON_CONFIGURE;
1754 if (configure(vecs, 1)) {
1756 condlog(0, "failure during configuration");
1764 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1765 condlog(0,"failed to create checker loop thread: %d", rc);
1768 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1769 condlog(0, "failed to create uevent dispatcher: %d", rc);
1772 pthread_attr_destroy(&misc_attr);
1774 /* Startup complete, create logfile */
1775 pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1776 /* Ignore errors, we can live without */
1778 running_state = DAEMON_RUNNING;
1780 sd_notify(0, "READY=1\nSTATUS=running");
1786 while(sem_wait(&exit_sem) != 0); /* Do nothing */
1789 sd_notify(0, "STATUS=shutdown");
1791 running_state = DAEMON_SHUTDOWN;
1793 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1794 vector_foreach_slot(vecs->mpvec, mpp, i)
1795 dm_queue_if_no_path(mpp->alias, 0);
1796 remove_maps_and_stop_waiters(vecs);
1799 pthread_cancel(check_thr);
1800 pthread_cancel(uevent_thr);
1801 pthread_cancel(uxlsnr_thr);
1802 pthread_cancel(uevq_thr);
1805 free_pathvec(vecs->pathvec, FREE_PATHS);
1806 vecs->pathvec = NULL;
1808 /* Now all the waitevent threads will start rushing in. */
1809 while (vecs->lock.depth > 0) {
1810 sleep (1); /* This is weak. */
1811 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1812 " waiting...", vecs->lock.depth);
1814 pthread_mutex_destroy(vecs->lock.mutex);
1815 FREE(vecs->lock.mutex);
1816 vecs->lock.depth = 0;
1817 vecs->lock.mutex = NULL;
1827 /* We're done here */
1829 condlog(3, "unlink pidfile");
1830 unlink(DEFAULT_PIDFILE);
1833 condlog(2, "--------shut down-------");
1839 * Freeing config must be done after condlog() and dm_lib_exit(),
1840 * because logging functions like dlog() and dm_write_log()
1841 * reference the config.
1848 dbg_free_final(NULL);
1852 sd_notify(0, "ERRNO=0");
1858 sd_notify(0, "ERRNO=1");
1869 if( (pid = fork()) < 0){
1870 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1878 if ( (pid = fork()) < 0)
1879 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1884 fprintf(stderr, "cannot chdir to '/', continuing\n");
1886 dev_null_fd = open("/dev/null", O_RDWR);
1887 if (dev_null_fd < 0){
1888 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1893 close(STDIN_FILENO);
1894 if (dup(dev_null_fd) < 0) {
1895 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1899 close(STDOUT_FILENO);
1900 if (dup(dev_null_fd) < 0) {
1901 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1905 close(STDERR_FILENO);
1906 if (dup(dev_null_fd) < 0) {
1907 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1912 daemon_pid = getpid();
1917 main (int argc, char *argv[])
1919 extern char *optarg;
1925 running_state = DAEMON_INIT;
1928 if (getuid() != 0) {
1929 fprintf(stderr, "need to be root\n");
1933 /* make sure we don't lock any path */
1935 fprintf(stderr, "can't chdir to root directory : %s\n",
1937 umask(umask(077) | 022);
1939 conf = alloc_config();
1944 while ((arg = getopt(argc, argv, ":dsv:k::")) != EOF ) {
1948 //debug=1; /* ### comment me out ### */
1951 if (sizeof(optarg) > sizeof(char *) ||
1952 !isdigit(optarg[0]))
1955 conf->verbosity = atoi(optarg);
1967 if (optind < argc) {
1972 while (optind < argc) {
1973 if (strchr(argv[optind], ' '))
1974 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1976 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1979 c += snprintf(c, s + CMDSIZE - c, "\n");
1997 return (child(NULL));
2000 void * mpath_pr_event_handler_fn (void * pathp )
2002 struct multipath * mpp;
2003 int i,j, ret, isFound;
2004 struct path * pp = (struct path *)pathp;
2005 unsigned char *keyp;
2007 struct prout_param_descriptor *param;
2008 struct prin_resp *resp;
2012 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2014 condlog(0,"%s Alloc failed for prin response", pp->dev);
2018 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2019 if (ret != MPATH_PR_SUCCESS )
2021 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2025 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2026 resp->prin_descriptor.prin_readkeys.additional_length );
2028 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2030 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2031 ret = MPATH_PR_SUCCESS;
2035 keyp = (unsigned char *)mpp->reservation_key;
2036 for (j = 0; j < 8; ++j) {
2042 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
2045 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2047 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
2048 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2049 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2051 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2058 condlog(0, "%s: Either device not registered or ", pp->dev);
2059 condlog(0, "host is not authorised for registration. Skip path");
2060 ret = MPATH_PR_OTHER;
2064 param= malloc(sizeof(struct prout_param_descriptor));
2065 memset(param, 0 , sizeof(struct prout_param_descriptor));
2067 for (j = 7; j >= 0; --j) {
2068 param->sa_key[j] = (prkey & 0xff);
2071 param->num_transportid = 0;
2073 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2075 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2076 if (ret != MPATH_PR_SUCCESS )
2078 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2088 int mpath_pr_event_handle(struct path *pp)
2092 pthread_attr_t attr;
2093 struct multipath * mpp;
2097 if (!mpp->reservation_key)
2100 pthread_attr_init(&attr);
2101 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2103 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2105 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2108 pthread_attr_destroy(&attr);
2109 rc = pthread_join(thread, NULL);