2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
20 #include <semaphore.h>
21 #include <mpath_persist.h>
39 #include <blacklist.h>
40 #include <structs_vec.h>
42 #include <devmapper.h>
45 #include <discovery.h>
49 #include <switchgroup.h>
51 #include <configure.h>
53 #include <pgpolicies.h>
62 #include "cli_handlers.h"
67 #define FILE_NAME_SIZE 256
70 #define LOG_MSG(a, b) \
73 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
75 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
78 struct mpath_event_param
81 struct multipath *mpp;
84 unsigned int mpath_mx_alloc_len;
87 enum daemon_status running_state;
90 static sem_t exit_sem;
92 * global copy of vecs for use in sig handlers
94 struct vectors * gvecs;
99 need_switch_pathgroup (struct multipath * mpp, int refresh)
101 struct pathgroup * pgp;
105 if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
109 * Refresh path priority values
112 vector_foreach_slot (mpp->pg, pgp, i)
113 vector_foreach_slot (pgp->paths, pp, j)
114 pathinfo(pp, conf->hwtable, DI_PRIO);
116 mpp->bestpg = select_path_group(mpp);
118 if (mpp->bestpg != mpp->nextpg)
125 switch_pathgroup (struct multipath * mpp)
127 mpp->stat_switchgroup++;
128 dm_switchgroup(mpp->alias, mpp->bestpg);
129 condlog(2, "%s: switch to path group #%i",
130 mpp->alias, mpp->bestpg);
134 coalesce_maps(struct vectors *vecs, vector nmpv)
136 struct multipath * ompp;
137 vector ompv = vecs->mpvec;
141 vector_foreach_slot (ompv, ompp, i) {
142 condlog(3, "%s: coalesce map", ompp->alias);
143 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
145 * remove all current maps not allowed by the
146 * current configuration
148 if (dm_flush_map(ompp->alias)) {
149 condlog(0, "%s: unable to flush devmap",
152 * may be just because the device is open
154 if (!vector_alloc_slot(nmpv))
157 vector_set_slot(nmpv, ompp);
158 setup_multipath(vecs, ompp);
160 if ((j = find_slot(ompv, (void *)ompp)) != -1)
161 vector_del_slot(ompv, j);
167 condlog(2, "%s devmap removed", ompp->alias);
169 } else if (conf->reassign_maps) {
170 condlog(3, "%s: Reassign existing device-mapper"
171 " devices", ompp->alias);
172 dm_reassign(ompp->alias);
179 sync_map_state(struct multipath *mpp)
181 struct pathgroup *pgp;
188 vector_foreach_slot (mpp->pg, pgp, i){
189 vector_foreach_slot (pgp->paths, pp, j){
190 if (pp->state == PATH_UNCHECKED ||
191 pp->state == PATH_WILD)
193 if ((pp->dmstate == PSTATE_FAILED ||
194 pp->dmstate == PSTATE_UNDEF) &&
195 (pp->state == PATH_UP || pp->state == PATH_GHOST))
196 dm_reinstate_path(mpp->alias, pp->dev_t);
197 else if ((pp->dmstate == PSTATE_ACTIVE ||
198 pp->dmstate == PSTATE_UNDEF) &&
199 (pp->state == PATH_DOWN ||
200 pp->state == PATH_SHAKY))
201 dm_fail_path(mpp->alias, pp->dev_t);
207 sync_maps_state(vector mpvec)
210 struct multipath *mpp;
212 vector_foreach_slot (mpvec, mpp, i)
217 flush_map(struct multipath * mpp, struct vectors * vecs)
220 * clear references to this map before flushing so we can ignore
221 * the spurious uevent we may generate with the dm_flush_map call below
223 if (dm_flush_map(mpp->alias)) {
225 * May not really be an error -- if the map was already flushed
226 * from the device mapper by dmsetup(8) for instance.
228 condlog(0, "%s: can't flush", mpp->alias);
233 condlog(2, "%s: map flushed", mpp->alias);
236 orphan_paths(vecs->pathvec, mpp);
237 remove_map_and_stop_waiter(mpp, vecs, 1);
243 uev_add_map (struct uevent * uev, struct vectors * vecs)
246 int major = -1, minor = -1, rc;
248 condlog(3, "%s: add map (uevent)", uev->kernel);
249 alias = uevent_get_dm_name(uev);
251 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
252 major = uevent_get_major(uev);
253 minor = uevent_get_minor(uev);
254 alias = dm_mapname(major, minor);
256 condlog(2, "%s: mapname not found for %d:%d",
257 uev->kernel, major, minor);
261 rc = ev_add_map(uev->kernel, alias, vecs);
267 ev_add_map (char * dev, char * alias, struct vectors * vecs)
270 struct multipath * mpp;
274 map_present = dm_map_present(alias);
276 if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
277 condlog(4, "%s: not a multipath map", alias);
281 mpp = find_mp_by_alias(vecs->mpvec, alias);
285 * Not really an error -- we generate our own uevent
286 * if we create a multipath mapped device as a result
289 if (conf->reassign_maps) {
290 condlog(3, "%s: Reassign existing device-mapper devices",
296 condlog(2, "%s: adding map", alias);
299 * now we can register the map
301 if (map_present && (mpp = add_map_without_path(vecs, alias))) {
303 condlog(2, "%s: devmap %s registered", alias, dev);
306 r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
309 r = coalesce_paths(vecs, NULL, refwwid, 0);
314 condlog(2, "%s: devmap %s added", alias, dev);
316 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
318 condlog(0, "%s: uev_add_map %s failed", alias, dev);
325 uev_remove_map (struct uevent * uev, struct vectors * vecs)
329 struct multipath *mpp;
331 condlog(2, "%s: remove map (uevent)", uev->kernel);
332 alias = uevent_get_dm_name(uev);
334 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
337 minor = uevent_get_minor(uev);
338 mpp = find_mp_by_minor(vecs->mpvec, minor);
341 condlog(2, "%s: devmap not registered, can't remove",
345 if (strcmp(mpp->alias, alias)) {
346 condlog(2, "%s: minor number mismatch (map %d, event %d)",
347 mpp->alias, mpp->dmi->minor, minor);
351 orphan_paths(vecs->pathvec, mpp);
352 remove_map_and_stop_waiter(mpp, vecs, 1);
359 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
361 struct multipath * mpp;
363 mpp = find_mp_by_minor(vecs->mpvec, minor);
366 condlog(2, "%s: devmap not registered, can't remove",
370 if (strcmp(mpp->alias, alias)) {
371 condlog(2, "%s: minor number mismatch (map %d, event %d)",
372 mpp->alias, mpp->dmi->minor, minor);
375 return flush_map(mpp, vecs);
379 uev_add_path (struct uevent *uev, struct vectors * vecs)
384 condlog(2, "%s: add path (uevent)", uev->kernel);
385 if (strstr(uev->kernel, "..") != NULL) {
387 * Don't allow relative device names in the pathvec
389 condlog(0, "%s: path name is invalid", uev->kernel);
393 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
395 condlog(0, "%s: spurious uevent, path already in pathvec",
399 if (!strlen(pp->wwid)) {
400 udev_device_unref(pp->udev);
401 pp->udev = udev_device_ref(uev->udev);
402 ret = pathinfo(pp, conf->hwtable,
403 DI_ALL | DI_BLACKLIST);
405 i = find_slot(vecs->pathvec, (void *)pp);
407 vector_del_slot(vecs->pathvec, i);
410 } else if (ret == 1) {
411 condlog(0, "%s: failed to reinitialize path",
418 * get path vital state
420 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
421 uev->udev, DI_ALL, &pp);
425 condlog(0, "%s: failed to store path info",
429 pp->checkint = conf->checkint;
432 return ev_add_path(pp, vecs);
441 ev_add_path (struct path * pp, struct vectors * vecs)
443 struct multipath * mpp;
444 char empty_buff[WWID_SIZE] = {0};
445 char params[PARAMS_SIZE] = {0};
447 int start_waiter = 0;
450 * need path UID to go any further
452 if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
453 condlog(0, "%s: failed to get path uid", pp->dev);
454 goto fail; /* leave path added to pathvec */
456 mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
459 if ((!pp->size) || (mpp->size != pp->size)) {
461 condlog(0, "%s: failed to add new path %s, "
463 mpp->alias, pp->dev);
465 condlog(0, "%s: failed to add new path %s, "
466 "device size mismatch",
467 mpp->alias, pp->dev);
468 int i = find_slot(vecs->pathvec, (void *)pp);
470 vector_del_slot(vecs->pathvec, i);
475 condlog(4,"%s: adopting all paths for path %s",
476 mpp->alias, pp->dev);
477 if (adopt_paths(vecs->pathvec, mpp, 1))
478 goto fail; /* leave path added to pathvec */
480 verify_paths(mpp, vecs, NULL);
481 mpp->flush_on_last_del = FLUSH_UNDEF;
482 mpp->action = ACT_RELOAD;
486 condlog(0, "%s: failed to create new map,"
487 " device size is 0 ", pp->dev);
488 int i = find_slot(vecs->pathvec, (void *)pp);
490 vector_del_slot(vecs->pathvec, i);
495 condlog(4,"%s: creating new map", pp->dev);
496 if ((mpp = add_map_with_path(vecs, pp, 1))) {
497 mpp->action = ACT_CREATE;
499 * We don't depend on ACT_CREATE, as domap will
500 * set it to ACT_NOTHING when complete.
505 goto fail; /* leave path added to pathvec */
508 /* persistent reseravtion check*/
509 mpath_pr_event_handle(pp);
512 * push the map to the device-mapper
514 if (setup_map(mpp, params, PARAMS_SIZE)) {
515 condlog(0, "%s: failed to setup map for addition of new "
516 "path %s", mpp->alias, pp->dev);
520 * reload the map for the multipath mapped device
522 if (domap(mpp, params) <= 0) {
523 condlog(0, "%s: failed in domap for addition of new "
524 "path %s", mpp->alias, pp->dev);
526 * deal with asynchronous uevents :((
528 if (mpp->action == ACT_RELOAD && retries-- > 0) {
529 condlog(0, "%s: uev_add_path sleep", mpp->alias);
531 update_mpp_paths(mpp, vecs->pathvec);
534 else if (mpp->action == ACT_RELOAD)
535 condlog(0, "%s: giving up reload", mpp->alias);
542 * update our state from kernel regardless of create or reload
544 if (setup_multipath(vecs, mpp))
545 goto fail; /* if setup_multipath fails, it removes the map */
549 if ((mpp->action == ACT_CREATE ||
550 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
551 start_waiter_thread(mpp, vecs))
555 condlog(2, "%s [%s]: path added to devmap %s",
556 pp->dev, pp->dev_t, mpp->alias);
563 remove_map(mpp, vecs, 1);
565 orphan_path(pp, "failed to add path");
570 uev_remove_path (struct uevent *uev, struct vectors * vecs)
574 condlog(2, "%s: remove path (uevent)", uev->kernel);
575 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
578 /* Not an error; path might have been purged earlier */
579 condlog(0, "%s: path already removed", uev->kernel);
583 return ev_remove_path(pp, vecs);
587 ev_remove_path (struct path *pp, struct vectors * vecs)
589 struct multipath * mpp;
591 char params[PARAMS_SIZE] = {0};
594 * avoid referring to the map of an orphaned path
596 if ((mpp = pp->mpp)) {
598 * transform the mp->pg vector of vectors of paths
599 * into a mp->params string to feed the device-mapper
601 if (update_mpp_paths(mpp, vecs->pathvec)) {
602 condlog(0, "%s: failed to update paths",
606 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
607 vector_del_slot(mpp->paths, i);
610 * remove the map IFF removing the last path
612 if (VECTOR_SIZE(mpp->paths) == 0) {
613 char alias[WWID_SIZE];
616 * flush_map will fail if the device is open
618 strncpy(alias, mpp->alias, WWID_SIZE);
619 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
620 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
622 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
623 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
624 dm_queue_if_no_path(mpp->alias, 0);
626 if (!flush_map(mpp, vecs)) {
627 condlog(2, "%s: removed map after"
628 " removing all paths",
634 * Not an error, continue
638 if (setup_map(mpp, params, PARAMS_SIZE)) {
639 condlog(0, "%s: failed to setup map for"
640 " removal of path %s", mpp->alias, pp->dev);
646 mpp->action = ACT_RELOAD;
647 if (domap(mpp, params) <= 0) {
648 condlog(0, "%s: failed in domap for "
649 "removal of path %s",
650 mpp->alias, pp->dev);
654 * update our state from kernel
656 if (setup_multipath(vecs, mpp)) {
661 condlog(2, "%s [%s]: path removed from map %s",
662 pp->dev, pp->dev_t, mpp->alias);
667 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
668 vector_del_slot(vecs->pathvec, i);
675 remove_map_and_stop_waiter(mpp, vecs, 1);
680 uev_update_path (struct uevent *uev, struct vectors * vecs)
684 ro = uevent_get_disk_ro(uev);
689 condlog(2, "%s: update path write_protect to '%d' (uevent)",
691 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
693 condlog(0, "%s: spurious uevent, path not found",
698 retval = reload_map(vecs, pp->mpp, 0);
700 condlog(2, "%s: map %s reloaded (retval %d)",
701 uev->kernel, pp->mpp->alias, retval);
710 map_discovery (struct vectors * vecs)
712 struct multipath * mpp;
715 if (dm_get_maps(vecs->mpvec))
718 vector_foreach_slot (vecs->mpvec, mpp, i)
719 if (setup_multipath(vecs, mpp))
726 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
728 struct vectors * vecs;
733 vecs = (struct vectors *)trigger_data;
735 pthread_cleanup_push(cleanup_lock, &vecs->lock);
737 pthread_testcancel();
739 r = parse_cmd(str, reply, len, vecs);
742 *reply = STRDUP("fail\n");
743 *len = strlen(*reply) + 1;
746 else if (!r && *len == 0) {
747 *reply = STRDUP("ok\n");
748 *len = strlen(*reply) + 1;
751 /* else if (r < 0) leave *reply alone */
753 lock_cleanup_pop(vecs->lock);
758 uev_discard(char * devpath)
764 * keep only block devices, discard partitions
766 tmp = strstr(devpath, "/block/");
768 condlog(4, "no /block/ in '%s'", devpath);
771 if (sscanf(tmp, "/block/%10s", a) != 1 ||
772 sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
773 condlog(4, "discard event on %s", devpath);
780 uev_trigger (struct uevent * uev, void * trigger_data)
783 struct vectors * vecs;
785 vecs = (struct vectors *)trigger_data;
787 if (uev_discard(uev->devpath))
790 pthread_cleanup_push(cleanup_lock, &vecs->lock);
792 pthread_testcancel();
796 * Add events are ignored here as the tables
797 * are not fully initialised then.
799 if (!strncmp(uev->kernel, "dm-", 3)) {
800 if (!strncmp(uev->action, "change", 6)) {
801 r = uev_add_map(uev, vecs);
804 if (!strncmp(uev->action, "remove", 6)) {
805 r = uev_remove_map(uev, vecs);
812 * path add/remove event
814 if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
818 if (!strncmp(uev->action, "add", 3)) {
819 r = uev_add_path(uev, vecs);
822 if (!strncmp(uev->action, "remove", 6)) {
823 r = uev_remove_path(uev, vecs);
826 if (!strncmp(uev->action, "change", 6)) {
827 r = uev_update_path(uev, vecs);
832 lock_cleanup_pop(vecs->lock);
837 ueventloop (void * ap)
839 if (uevent_listen(udev))
840 condlog(0, "error starting uevent listener");
848 if (uevent_dispatch(&uev_trigger, ap))
849 condlog(0, "error starting uevent dispatcher");
854 uxlsnrloop (void * ap)
859 set_handler_callback(LIST+PATHS, cli_list_paths);
860 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
861 set_handler_callback(LIST+MAPS, cli_list_maps);
862 set_handler_callback(LIST+STATUS, cli_list_status);
863 set_handler_callback(LIST+DAEMON, cli_list_daemon);
864 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
865 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
866 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
867 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
868 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
869 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
870 set_handler_callback(LIST+CONFIG, cli_list_config);
871 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
872 set_handler_callback(LIST+DEVICES, cli_list_devices);
873 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
874 set_handler_callback(ADD+PATH, cli_add_path);
875 set_handler_callback(DEL+PATH, cli_del_path);
876 set_handler_callback(ADD+MAP, cli_add_map);
877 set_handler_callback(DEL+MAP, cli_del_map);
878 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
879 set_handler_callback(RECONFIGURE, cli_reconfigure);
880 set_handler_callback(SUSPEND+MAP, cli_suspend);
881 set_handler_callback(RESUME+MAP, cli_resume);
882 set_handler_callback(RESIZE+MAP, cli_resize);
883 set_handler_callback(RELOAD+MAP, cli_reload);
884 set_handler_callback(RESET+MAP, cli_reassign);
885 set_handler_callback(REINSTATE+PATH, cli_reinstate);
886 set_handler_callback(FAIL+PATH, cli_fail);
887 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
888 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
889 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
890 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
891 set_handler_callback(QUIT, cli_quit);
892 set_handler_callback(SHUTDOWN, cli_shutdown);
893 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
894 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
895 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
896 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
897 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
900 uxsock_listen(&uxsock_trigger, ap);
914 switch (running_state) {
919 case DAEMON_CONFIGURE:
923 case DAEMON_SHUTDOWN:
930 fail_path (struct path * pp, int del_active)
935 condlog(2, "checker failed path %s in map %s",
936 pp->dev_t, pp->mpp->alias);
938 dm_fail_path(pp->mpp->alias, pp->dev_t);
940 update_queue_mode_del_path(pp->mpp);
944 * caller must have locked the path list before calling that function
947 reinstate_path (struct path * pp, int add_active)
952 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
953 condlog(0, "%s: reinstate failed", pp->dev_t);
955 condlog(2, "%s: reinstated", pp->dev_t);
957 update_queue_mode_add_path(pp->mpp);
962 enable_group(struct path * pp)
964 struct pathgroup * pgp;
967 * if path is added through uev_add_path, pgindex can be unset.
968 * next update_strings() will set it, upon map reload event.
970 * we can safely return here, because upon map reload, all
971 * PG will be enabled.
973 if (!pp->mpp->pg || !pp->pgindex)
976 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
978 if (pgp->status == PGSTATE_DISABLED) {
979 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
980 dm_enablegroup(pp->mpp->alias, pp->pgindex);
985 mpvec_garbage_collector (struct vectors * vecs)
987 struct multipath * mpp;
993 vector_foreach_slot (vecs->mpvec, mpp, i) {
994 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
995 condlog(2, "%s: remove dead map", mpp->alias);
996 remove_map_and_stop_waiter(mpp, vecs, 1);
1002 /* This is called after a path has started working again. It the multipath
1003 * device for this path uses the followover failback type, and this is the
1004 * best pathgroup, and this is the first path in the pathgroup to come back
1005 * up, then switch to this pathgroup */
1007 followover_should_failback(struct path * pp)
1009 struct pathgroup * pgp;
1013 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1014 !pp->mpp->pg || !pp->pgindex ||
1015 pp->pgindex != pp->mpp->bestpg)
1018 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1019 vector_foreach_slot(pgp->paths, pp1, i) {
1022 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1029 defered_failback_tick (vector mpvec)
1031 struct multipath * mpp;
1034 vector_foreach_slot (mpvec, mpp, i) {
1036 * defered failback getting sooner
1038 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1039 mpp->failback_tick--;
1041 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1042 switch_pathgroup(mpp);
1048 retry_count_tick(vector mpvec)
1050 struct multipath *mpp;
1053 vector_foreach_slot (mpvec, mpp, i) {
1054 if (mpp->retry_tick) {
1055 mpp->stat_total_queueing_time++;
1056 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1057 if(--mpp->retry_tick == 0) {
1058 dm_queue_if_no_path(mpp->alias, 0);
1059 condlog(2, "%s: Disable queueing", mpp->alias);
1065 int update_prio(struct path *pp, int refresh_all)
1069 struct pathgroup * pgp;
1070 int i, j, changed = 0;
1073 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1074 vector_foreach_slot (pgp->paths, pp1, j) {
1075 oldpriority = pp1->priority;
1076 pathinfo(pp1, conf->hwtable, DI_PRIO);
1077 if (pp1->priority != oldpriority)
1083 oldpriority = pp->priority;
1084 pathinfo(pp, conf->hwtable, DI_PRIO);
1086 if (pp->priority == oldpriority)
1091 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1093 if (reload_map(vecs, mpp, refresh))
1097 if (setup_multipath(vecs, mpp) != 0)
1099 sync_map_state(mpp);
1105 check_path (struct vectors * vecs, struct path * pp)
1108 int new_path_up = 0;
1109 int chkr_new_path_up = 0;
1110 int oldchkrstate = pp->chkrstate;
1115 if (pp->tick && --pp->tick)
1116 return; /* don't check this path yet */
1119 * provision a next check soonest,
1120 * in case we exit abnormaly from here
1122 pp->tick = conf->checkint;
1124 newstate = path_offline(pp);
1125 if (newstate == PATH_UP)
1126 newstate = get_state(pp, 1);
1128 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1129 condlog(2, "%s: unusable path", pp->dev);
1130 pathinfo(pp, conf->hwtable, 0);
1134 * Async IO in flight. Keep the previous path state
1135 * and reschedule as soon as possible
1137 if (newstate == PATH_PENDING) {
1142 * Synchronize with kernel state
1144 if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1145 condlog(1, "%s: Could not synchronize with kernel state",
1147 pp->dmstate = PSTATE_UNDEF;
1149 pp->chkrstate = newstate;
1150 if (newstate != pp->state) {
1151 int oldstate = pp->state;
1152 pp->state = newstate;
1153 LOG_MSG(1, checker_message(&pp->checker));
1156 * upon state change, reset the checkint
1157 * to the shortest delay
1159 pp->checkint = conf->checkint;
1161 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1163 * proactively fail path in the DM
1165 if (oldstate == PATH_UP ||
1166 oldstate == PATH_GHOST)
1172 * cancel scheduled failback
1174 pp->mpp->failback_tick = 0;
1176 pp->mpp->stat_path_failures++;
1180 if(newstate == PATH_UP || newstate == PATH_GHOST){
1181 if ( pp->mpp && pp->mpp->prflag ){
1183 * Check Persistent Reservation.
1185 condlog(2, "%s: checking persistent reservation "
1186 "registration", pp->dev);
1187 mpath_pr_event_handle(pp);
1192 * reinstate this path
1194 if (oldstate != PATH_UP &&
1195 oldstate != PATH_GHOST)
1196 reinstate_path(pp, 1);
1198 reinstate_path(pp, 0);
1202 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1203 chkr_new_path_up = 1;
1206 * if at least one path is up in a group, and
1207 * the group is disabled, re-enable it
1209 if (newstate == PATH_UP)
1212 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1213 if (pp->dmstate == PSTATE_FAILED ||
1214 pp->dmstate == PSTATE_UNDEF) {
1215 /* Clear IO errors */
1216 reinstate_path(pp, 0);
1218 LOG_MSG(4, checker_message(&pp->checker));
1219 if (pp->checkint != conf->max_checkint) {
1221 * double the next check delay.
1222 * max at conf->max_checkint
1224 if (pp->checkint < (conf->max_checkint / 2))
1225 pp->checkint = 2 * pp->checkint;
1227 pp->checkint = conf->max_checkint;
1229 condlog(4, "%s: delay next check %is",
1230 pp->dev_t, pp->checkint);
1232 pp->tick = pp->checkint;
1235 else if (newstate == PATH_DOWN) {
1236 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1237 LOG_MSG(3, checker_message(&pp->checker));
1239 LOG_MSG(2, checker_message(&pp->checker));
1242 pp->state = newstate;
1245 * path prio refreshing
1247 condlog(4, "path prio refresh");
1249 if (update_prio(pp, new_path_up) &&
1250 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1251 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1252 update_path_groups(pp->mpp, vecs, !new_path_up);
1253 else if (need_switch_pathgroup(pp->mpp, 0)) {
1254 if (pp->mpp->pgfailback > 0 &&
1255 (new_path_up || pp->mpp->failback_tick <= 0))
1256 pp->mpp->failback_tick =
1257 pp->mpp->pgfailback + 1;
1258 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1259 (chkr_new_path_up && followover_should_failback(pp)))
1260 switch_pathgroup(pp->mpp);
1265 checkerloop (void *ap)
1267 struct vectors *vecs;
1272 mlockall(MCL_CURRENT | MCL_FUTURE);
1273 vecs = (struct vectors *)ap;
1274 condlog(2, "path checkers start up");
1277 * init the path check interval
1279 vector_foreach_slot (vecs->pathvec, pp, i) {
1280 pp->checkint = conf->checkint;
1284 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1286 pthread_testcancel();
1289 if (vecs->pathvec) {
1290 vector_foreach_slot (vecs->pathvec, pp, i) {
1291 check_path(vecs, pp);
1295 defered_failback_tick(vecs->mpvec);
1296 retry_count_tick(vecs->mpvec);
1301 condlog(4, "map garbage collection");
1302 mpvec_garbage_collector(vecs);
1306 lock_cleanup_pop(vecs->lock);
1313 configure (struct vectors * vecs, int start_waiters)
1315 struct multipath * mpp;
1320 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1323 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1326 if (!(mpvec = vector_alloc()))
1330 * probe for current path (from sysfs) and map (from dm) sets
1332 path_discovery(vecs->pathvec, conf, DI_ALL);
1334 vector_foreach_slot (vecs->pathvec, pp, i){
1335 if (filter_path(conf, pp) > 0){
1336 vector_del_slot(vecs->pathvec, i);
1341 pp->checkint = conf->checkint;
1343 if (map_discovery(vecs))
1347 * create new set of maps & push changed ones into dm
1349 if (coalesce_paths(vecs, mpvec, NULL, 1))
1353 * may need to remove some maps which are no longer relevant
1354 * e.g., due to blacklist changes in conf file
1356 if (coalesce_maps(vecs, mpvec))
1361 sync_maps_state(mpvec);
1362 vector_foreach_slot(mpvec, mpp, i){
1363 remember_wwid(mpp->wwid);
1368 * purge dm of old maps
1373 * save new set of maps formed by considering current path state
1375 vector_free(vecs->mpvec);
1376 vecs->mpvec = mpvec;
1379 * start dm event waiter threads for these new maps
1381 vector_foreach_slot(vecs->mpvec, mpp, i) {
1382 if (setup_multipath(vecs, mpp))
1385 if (start_waiter_thread(mpp, vecs))
1392 reconfigure (struct vectors * vecs)
1394 struct config * old = conf;
1398 * free old map and path vectors ... they use old conf state
1400 if (VECTOR_SIZE(vecs->mpvec))
1401 remove_maps_and_stop_waiters(vecs);
1403 if (VECTOR_SIZE(vecs->pathvec))
1404 free_pathvec(vecs->pathvec, FREE_PATHS);
1406 vecs->pathvec = NULL;
1409 if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1410 conf->verbosity = old->verbosity;
1420 static struct vectors *
1423 struct vectors * vecs;
1425 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1431 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1433 if (!vecs->lock.mutex)
1436 pthread_mutex_init(vecs->lock.mutex, NULL);
1437 vecs->lock.depth = 0;
1443 condlog(0, "failed to init paths");
1448 signal_set(int signo, void (*func) (int))
1451 struct sigaction sig;
1452 struct sigaction osig;
1454 sig.sa_handler = func;
1455 sigemptyset(&sig.sa_mask);
1458 r = sigaction(signo, &sig, &osig);
1463 return (osig.sa_handler);
1467 handle_signals(void)
1469 if (reconfig_sig && running_state == DAEMON_RUNNING) {
1470 condlog(2, "reconfigure (signal)");
1471 pthread_cleanup_push(cleanup_lock,
1474 pthread_testcancel();
1476 lock_cleanup_pop(gvecs->lock);
1478 if (log_reset_sig) {
1479 condlog(2, "reset log (signal)");
1480 pthread_mutex_lock(&logq_lock);
1481 log_reset("multipathd");
1482 pthread_mutex_unlock(&logq_lock);
1512 sigaddset(&set, SIGHUP);
1513 sigaddset(&set, SIGUSR1);
1514 pthread_sigmask(SIG_BLOCK, &set, NULL);
1516 signal_set(SIGHUP, sighup);
1517 signal_set(SIGUSR1, sigusr1);
1518 signal_set(SIGINT, sigend);
1519 signal_set(SIGTERM, sigend);
1520 signal(SIGPIPE, SIG_IGN);
1527 static struct sched_param sched_param = {
1528 .sched_priority = 99
1531 res = sched_setscheduler (0, SCHED_RR, &sched_param);
1534 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1541 #ifdef OOM_SCORE_ADJ_MIN
1543 char *file = "/proc/self/oom_score_adj";
1544 int score = OOM_SCORE_ADJ_MIN;
1547 char *file = "/proc/self/oom_adj";
1548 int score = OOM_ADJUST_MIN;
1554 if (stat(file, &st) == 0){
1555 fp = fopen(file, "w");
1557 condlog(0, "couldn't fopen %s : %s", file,
1561 fprintf(fp, "%i", score);
1565 if (errno != ENOENT) {
1566 condlog(0, "couldn't stat %s : %s", file,
1570 #ifdef OOM_ADJUST_MIN
1571 file = "/proc/self/oom_adj";
1572 score = OOM_ADJUST_MIN;
1577 condlog(0, "couldn't adjust oom score");
1581 child (void * param)
1583 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1584 pthread_attr_t log_attr, misc_attr, uevent_attr;
1585 struct vectors * vecs;
1586 struct multipath * mpp;
1590 mlockall(MCL_CURRENT | MCL_FUTURE);
1591 sem_init(&exit_sem, 0, 0);
1596 setup_thread_attr(&misc_attr, 64 * 1024, 1);
1597 setup_thread_attr(&uevent_attr, 128 * 1024, 1);
1598 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1601 setup_thread_attr(&log_attr, 64 * 1024, 0);
1602 log_thread_start(&log_attr);
1603 pthread_attr_destroy(&log_attr);
1606 running_state = DAEMON_START;
1608 condlog(2, "--------start up--------");
1609 condlog(2, "read " DEFAULT_CONFIGFILE);
1611 if (load_config(DEFAULT_CONFIGFILE, udev))
1614 if (init_checkers()) {
1615 condlog(0, "failed to initialize checkers");
1619 condlog(0, "failed to initialize prioritizers");
1623 setlogmask(LOG_UPTO(conf->verbosity + 3));
1625 if (conf->max_fds) {
1626 struct rlimit fd_limit;
1628 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1629 condlog(0, "can't get open fds limit: %s",
1631 fd_limit.rlim_cur = 0;
1632 fd_limit.rlim_max = 0;
1634 if (fd_limit.rlim_cur < conf->max_fds) {
1635 fd_limit.rlim_cur = conf->max_fds;
1636 if (fd_limit.rlim_max < conf->max_fds)
1637 fd_limit.rlim_max = conf->max_fds;
1638 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1639 condlog(0, "can't set open fds limit to "
1641 fd_limit.rlim_cur, fd_limit.rlim_max,
1644 condlog(3, "set open fds limit to %lu/%lu",
1645 fd_limit.rlim_cur, fd_limit.rlim_max);
1651 vecs = gvecs = init_vecs();
1659 udev_set_sync_support(0);
1661 * Start uevent listener early to catch events
1663 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
1664 condlog(0, "failed to create uevent thread: %d", rc);
1667 pthread_attr_destroy(&uevent_attr);
1668 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1669 condlog(0, "failed to create cli listener: %d", rc);
1673 * fetch and configure both paths and multipaths
1675 running_state = DAEMON_CONFIGURE;
1678 if (configure(vecs, 1)) {
1680 condlog(0, "failure during configuration");
1688 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1689 condlog(0,"failed to create checker loop thread: %d", rc);
1692 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1693 condlog(0, "failed to create uevent dispatcher: %d", rc);
1696 pthread_attr_destroy(&misc_attr);
1698 /* Startup complete, create logfile */
1699 pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1700 /* Ignore errors, we can live without */
1702 running_state = DAEMON_RUNNING;
1707 while(sem_wait(&exit_sem) != 0); /* Do nothing */
1708 running_state = DAEMON_SHUTDOWN;
1710 if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1711 vector_foreach_slot(vecs->mpvec, mpp, i)
1712 dm_queue_if_no_path(mpp->alias, 0);
1713 remove_maps_and_stop_waiters(vecs);
1716 pthread_cancel(check_thr);
1717 pthread_cancel(uevent_thr);
1718 pthread_cancel(uxlsnr_thr);
1719 pthread_cancel(uevq_thr);
1722 free_pathvec(vecs->pathvec, FREE_PATHS);
1723 vecs->pathvec = NULL;
1725 /* Now all the waitevent threads will start rushing in. */
1726 while (vecs->lock.depth > 0) {
1727 sleep (1); /* This is weak. */
1728 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1729 " waiting...", vecs->lock.depth);
1731 pthread_mutex_destroy(vecs->lock.mutex);
1732 FREE(vecs->lock.mutex);
1733 vecs->lock.depth = 0;
1734 vecs->lock.mutex = NULL;
1744 /* We're done here */
1746 condlog(3, "unlink pidfile");
1747 unlink(DEFAULT_PIDFILE);
1750 condlog(2, "--------shut down-------");
1756 * Freeing config must be done after condlog() and dm_lib_exit(),
1757 * because logging functions like dlog() and dm_write_log()
1758 * reference the config.
1765 dbg_free_final(NULL);
1777 if( (pid = fork()) < 0){
1778 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1786 if ( (pid = fork()) < 0)
1787 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1792 fprintf(stderr, "cannot chdir to '/', continuing\n");
1794 dev_null_fd = open("/dev/null", O_RDWR);
1795 if (dev_null_fd < 0){
1796 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1801 close(STDIN_FILENO);
1802 if (dup(dev_null_fd) < 0) {
1803 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1807 close(STDOUT_FILENO);
1808 if (dup(dev_null_fd) < 0) {
1809 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1813 close(STDERR_FILENO);
1814 if (dup(dev_null_fd) < 0) {
1815 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1820 daemon_pid = getpid();
1825 main (int argc, char *argv[])
1827 extern char *optarg;
1833 running_state = DAEMON_INIT;
1836 if (getuid() != 0) {
1837 fprintf(stderr, "need to be root\n");
1841 /* make sure we don't lock any path */
1843 fprintf(stderr, "can't chdir to root directory : %s\n",
1845 umask(umask(077) | 022);
1847 conf = alloc_config();
1852 while ((arg = getopt(argc, argv, ":dsv:k::")) != EOF ) {
1856 //debug=1; /* ### comment me out ### */
1859 if (sizeof(optarg) > sizeof(char *) ||
1860 !isdigit(optarg[0]))
1863 conf->verbosity = atoi(optarg);
1875 if (optind < argc) {
1880 while (optind < argc) {
1881 if (strchr(argv[optind], ' '))
1882 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1884 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1887 c += snprintf(c, s + CMDSIZE - c, "\n");
1905 return (child(NULL));
1908 void * mpath_pr_event_handler_fn (void * pathp )
1910 struct multipath * mpp;
1911 int i,j, ret, isFound;
1912 struct path * pp = (struct path *)pathp;
1913 unsigned char *keyp;
1915 struct prout_param_descriptor *param;
1916 struct prin_resp *resp;
1920 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1922 condlog(0,"%s Alloc failed for prin response", pp->dev);
1926 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1927 if (ret != MPATH_PR_SUCCESS )
1929 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1933 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1934 resp->prin_descriptor.prin_readkeys.additional_length );
1936 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1938 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1939 ret = MPATH_PR_SUCCESS;
1943 keyp = (unsigned char *)mpp->reservation_key;
1944 for (j = 0; j < 8; ++j) {
1950 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ", prkey);
1953 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1955 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
1956 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1957 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1959 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1966 condlog(0, "%s: Either device not registered or ", pp->dev);
1967 condlog(0, "host is not authorised for registration. Skip path");
1968 ret = MPATH_PR_OTHER;
1972 param= malloc(sizeof(struct prout_param_descriptor));
1973 memset(param, 0 , sizeof(struct prout_param_descriptor));
1975 for (j = 7; j >= 0; --j) {
1976 param->sa_key[j] = (prkey & 0xff);
1979 param->num_transportid = 0;
1981 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1983 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1984 if (ret != MPATH_PR_SUCCESS )
1986 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1996 int mpath_pr_event_handle(struct path *pp)
2000 pthread_attr_t attr;
2001 struct multipath * mpp;
2005 if (!mpp->reservation_key)
2008 pthread_attr_init(&attr);
2009 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2011 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2013 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2016 pthread_attr_destroy(&attr);
2017 rc = pthread_join(thread, NULL);