2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <linux/oom.h>
20 #include <systemd/sd-daemon.h>
22 #include <semaphore.h>
29 #include "time-util.h"
37 static int use_watchdog;
51 #include "blacklist.h"
52 #include "structs_vec.h"
54 #include "devmapper.h"
57 #include "discovery.h"
61 #include "switchgroup.h"
63 #include "configure.h"
66 #include "pgpolicies.h"
71 #include "mpath_cmd.h"
72 #include "mpath_persist.h"
74 #include "prioritizers/alua_rtpg.h"
81 #include "cli_handlers.h"
85 #include "io_err_stat.h"
88 #include "../third-party/valgrind/drd.h"
90 #define FILE_NAME_SIZE 256
93 #define LOG_MSG(lvl, verb, pp) \
97 condlog(lvl, "%s: %s - path offline", \
98 pp->mpp->alias, pp->dev); \
101 checker_message(&pp->checker); \
104 condlog(lvl, "%s: %s - %s checker%s", \
107 checker_name(&pp->checker), \
113 struct mpath_event_param
116 struct multipath *mpp;
122 int bindings_read_only;
124 #ifdef NO_DMEVENTS_POLL
125 int poll_dmevents = 0;
127 int poll_dmevents = 1;
129 enum daemon_status running_state = DAEMON_INIT;
131 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
132 pthread_cond_t config_cond;
135 * global copy of vecs for use in sig handlers
137 struct vectors * gvecs;
141 struct config *multipath_conf;
143 /* Local variables */
144 static volatile sig_atomic_t exit_sig;
145 static volatile sig_atomic_t reconfig_sig;
146 static volatile sig_atomic_t log_reset_sig;
151 switch (running_state) {
156 case DAEMON_CONFIGURE:
162 case DAEMON_SHUTDOWN:
169 * I love you too, systemd ...
172 sd_notify_status(void)
174 switch (running_state) {
176 return "STATUS=init";
178 return "STATUS=startup";
179 case DAEMON_CONFIGURE:
180 return "STATUS=configure";
184 case DAEMON_SHUTDOWN:
185 return "STATUS=shutdown";
191 static void do_sd_notify(enum daemon_status old_state)
194 * Checkerloop switches back and forth between idle and running state.
195 * No need to tell systemd each time.
196 * These notifications cause a lot of overhead on dbus.
198 if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
199 (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
201 sd_notify(0, sd_notify_status());
205 static void config_cleanup(void *arg)
207 pthread_mutex_unlock(&config_lock);
210 static void __post_config_state(enum daemon_status state)
212 if (state != running_state && running_state != DAEMON_SHUTDOWN) {
213 enum daemon_status old_state = running_state;
215 running_state = state;
216 pthread_cond_broadcast(&config_cond);
218 do_sd_notify(old_state);
223 void post_config_state(enum daemon_status state)
225 pthread_mutex_lock(&config_lock);
226 pthread_cleanup_push(config_cleanup, NULL);
227 __post_config_state(state);
228 pthread_cleanup_pop(1);
231 int set_config_state(enum daemon_status state)
235 pthread_cleanup_push(config_cleanup, NULL);
236 pthread_mutex_lock(&config_lock);
237 if (running_state != state) {
238 enum daemon_status old_state = running_state;
240 if (running_state == DAEMON_SHUTDOWN)
242 else if (running_state != DAEMON_IDLE) {
245 clock_gettime(CLOCK_MONOTONIC, &ts);
247 rc = pthread_cond_timedwait(&config_cond,
251 running_state = state;
252 pthread_cond_broadcast(&config_cond);
254 do_sd_notify(old_state);
258 pthread_cleanup_pop(1);
262 struct config *get_multipath_config(void)
265 return rcu_dereference(multipath_conf);
268 void put_multipath_config(void *arg)
274 need_switch_pathgroup (struct multipath * mpp, int refresh)
276 struct pathgroup * pgp;
286 * Refresh path priority values
289 vector_foreach_slot (mpp->pg, pgp, i) {
290 vector_foreach_slot (pgp->paths, pp, j) {
291 conf = get_multipath_config();
292 pthread_cleanup_push(put_multipath_config,
294 pathinfo(pp, conf, DI_PRIO);
295 pthread_cleanup_pop(1);
300 if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
303 bestpg = select_path_group(mpp);
304 if (mpp->pgfailback == -FAILBACK_MANUAL)
307 mpp->bestpg = bestpg;
308 if (mpp->bestpg != mpp->nextpg)
315 switch_pathgroup (struct multipath * mpp)
317 mpp->stat_switchgroup++;
318 dm_switchgroup(mpp->alias, mpp->bestpg);
319 condlog(2, "%s: switch to path group #%i",
320 mpp->alias, mpp->bestpg);
324 wait_for_events(struct multipath *mpp, struct vectors *vecs)
327 return watch_dmevents(mpp->alias);
329 return start_waiter_thread(mpp, vecs);
333 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
335 /* devices are automatically removed by the dmevent polling code,
336 * so they don't need to be manually removed here */
338 stop_waiter_thread(mpp, vecs);
339 remove_map(mpp, vecs, PURGE_VEC);
343 remove_maps_and_stop_waiters(struct vectors *vecs)
346 struct multipath * mpp;
351 if (!poll_dmevents) {
352 vector_foreach_slot(vecs->mpvec, mpp, i)
353 stop_waiter_thread(mpp, vecs);
356 unwatch_all_dmevents();
362 set_multipath_wwid (struct multipath * mpp)
364 if (strlen(mpp->wwid))
367 dm_get_uuid(mpp->alias, mpp->wwid);
370 static void set_no_path_retry(struct multipath *mpp)
372 char is_queueing = 0;
374 mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
375 if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
378 switch (mpp->no_path_retry) {
379 case NO_PATH_RETRY_UNDEF:
381 case NO_PATH_RETRY_FAIL:
383 dm_queue_if_no_path(mpp->alias, 0);
385 case NO_PATH_RETRY_QUEUE:
387 dm_queue_if_no_path(mpp->alias, 1);
390 if (mpp->nr_active > 0) {
392 dm_queue_if_no_path(mpp->alias, 1);
393 } else if (is_queueing && mpp->retry_tick == 0)
394 enter_recovery_mode(mpp);
399 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
402 if (dm_get_info(mpp->alias, &mpp->dmi)) {
403 /* Error accessing table */
404 condlog(3, "%s: cannot access table", mpp->alias);
408 if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
409 condlog(0, "%s: failed to setup multipath", mpp->alias);
414 set_no_path_retry(mpp);
415 if (VECTOR_SIZE(mpp->paths) != 0)
416 dm_cancel_deferred_remove(mpp);
421 remove_map_and_stop_waiter(mpp, vecs);
425 int update_multipath (struct vectors *vecs, char *mapname, int reset)
427 struct multipath *mpp;
428 struct pathgroup *pgp;
432 mpp = find_mp_by_alias(vecs->mpvec, mapname);
435 condlog(3, "%s: multipath map not found", mapname);
439 if (__setup_multipath(vecs, mpp, reset))
440 return 1; /* mpp freed in setup_multipath */
443 * compare checkers states with DM states
445 vector_foreach_slot (mpp->pg, pgp, i) {
446 vector_foreach_slot (pgp->paths, pp, j) {
447 if (pp->dmstate != PSTATE_FAILED)
450 if (pp->state != PATH_DOWN) {
452 int oldstate = pp->state;
455 conf = get_multipath_config();
456 checkint = conf->checkint;
457 put_multipath_config(conf);
458 condlog(2, "%s: mark as failed", pp->dev);
459 mpp->stat_path_failures++;
460 pp->state = PATH_DOWN;
461 if (oldstate == PATH_UP ||
462 oldstate == PATH_GHOST)
463 update_queue_mode_del_path(mpp);
467 * schedule the next check earlier
469 if (pp->tick > checkint)
478 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
481 char params[PARAMS_SIZE] = {0};
484 condlog(4, "%s: updating new map", mpp->alias);
485 if (adopt_paths(vecs->pathvec, mpp)) {
486 condlog(0, "%s: failed to adopt paths for new map update",
491 verify_paths(mpp, vecs);
492 mpp->action = ACT_RELOAD;
494 extract_hwe_from_path(mpp);
495 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
496 condlog(0, "%s: failed to setup new map in update", mpp->alias);
500 if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
501 condlog(0, "%s: map_udate sleep", mpp->alias);
508 if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
509 condlog(0, "%s: failed to create new map", mpp->alias);
510 remove_map(mpp, vecs, 1);
514 if (setup_multipath(vecs, mpp))
520 condlog(0, "%s: failed reload in new map update", mpp->alias);
524 static struct multipath *
525 add_map_without_path (struct vectors *vecs, const char *alias)
527 struct multipath * mpp = alloc_multipath();
537 mpp->alias = STRDUP(alias);
539 if (dm_get_info(mpp->alias, &mpp->dmi)) {
540 condlog(3, "%s: cannot access table", mpp->alias);
543 set_multipath_wwid(mpp);
544 conf = get_multipath_config();
545 mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
546 put_multipath_config(conf);
548 if (update_multipath_table(mpp, vecs->pathvec, 1))
550 if (update_multipath_status(mpp))
553 if (!vector_alloc_slot(vecs->mpvec))
556 vector_set_slot(vecs->mpvec, mpp);
558 if (update_map(mpp, vecs, 1) != 0) /* map removed */
563 remove_map(mpp, vecs, PURGE_VEC);
568 coalesce_maps(struct vectors *vecs, vector nmpv)
570 struct multipath * ompp;
571 vector ompv = vecs->mpvec;
572 unsigned int i, reassign_maps;
575 conf = get_multipath_config();
576 reassign_maps = conf->reassign_maps;
577 put_multipath_config(conf);
578 vector_foreach_slot (ompv, ompp, i) {
579 condlog(3, "%s: coalesce map", ompp->alias);
580 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
582 * remove all current maps not allowed by the
583 * current configuration
585 if (dm_flush_map(ompp->alias)) {
586 condlog(0, "%s: unable to flush devmap",
589 * may be just because the device is open
591 if (setup_multipath(vecs, ompp) != 0) {
595 if (!vector_alloc_slot(nmpv))
598 vector_set_slot(nmpv, ompp);
600 vector_del_slot(ompv, i);
605 condlog(2, "%s devmap removed", ompp->alias);
607 } else if (reassign_maps) {
608 condlog(3, "%s: Reassign existing device-mapper"
609 " devices", ompp->alias);
610 dm_reassign(ompp->alias);
617 sync_maps_state(vector mpvec)
620 struct multipath *mpp;
622 vector_foreach_slot (mpvec, mpp, i)
627 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
632 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
634 r = dm_flush_map(mpp->alias);
636 * clear references to this map before flushing so we can ignore
637 * the spurious uevent we may generate with the dm_flush_map call below
641 * May not really be an error -- if the map was already flushed
642 * from the device mapper by dmsetup(8) for instance.
645 condlog(0, "%s: can't flush", mpp->alias);
647 condlog(2, "%s: devmap deferred remove", mpp->alias);
648 mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
654 condlog(2, "%s: map flushed", mpp->alias);
657 orphan_paths(vecs->pathvec, mpp);
658 remove_map_and_stop_waiter(mpp, vecs);
664 uev_add_map (struct uevent * uev, struct vectors * vecs)
667 int major = -1, minor = -1, rc;
669 condlog(3, "%s: add map (uevent)", uev->kernel);
670 alias = uevent_get_dm_name(uev);
672 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
673 major = uevent_get_major(uev);
674 minor = uevent_get_minor(uev);
675 alias = dm_mapname(major, minor);
677 condlog(2, "%s: mapname not found for %d:%d",
678 uev->kernel, major, minor);
682 pthread_cleanup_push(cleanup_lock, &vecs->lock);
684 pthread_testcancel();
685 rc = ev_add_map(uev->kernel, alias, vecs);
686 lock_cleanup_pop(vecs->lock);
692 * ev_add_map expects that the multipath device already exists in kernel
693 * before it is called. It just adds a device to multipathd or updates an
697 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
699 struct multipath * mpp;
700 int delayed_reconfig, reassign_maps;
703 if (!dm_is_mpath(alias)) {
704 condlog(4, "%s: not a multipath map", alias);
708 mpp = find_mp_by_alias(vecs->mpvec, alias);
711 if (mpp->wait_for_udev > 1) {
712 condlog(2, "%s: performing delayed actions",
714 if (update_map(mpp, vecs, 0))
715 /* setup multipathd removed the map */
718 conf = get_multipath_config();
719 delayed_reconfig = conf->delayed_reconfig;
720 reassign_maps = conf->reassign_maps;
721 put_multipath_config(conf);
722 if (mpp->wait_for_udev) {
723 mpp->wait_for_udev = 0;
724 if (delayed_reconfig &&
725 !need_to_delay_reconfig(vecs)) {
726 condlog(2, "reconfigure (delayed)");
727 set_config_state(DAEMON_CONFIGURE);
732 * Not really an error -- we generate our own uevent
733 * if we create a multipath mapped device as a result
737 condlog(3, "%s: Reassign existing device-mapper devices",
743 condlog(2, "%s: adding map", alias);
746 * now we can register the map
748 if ((mpp = add_map_without_path(vecs, alias))) {
750 condlog(2, "%s: devmap %s registered", alias, dev);
753 condlog(2, "%s: ev_add_map failed", dev);
759 uev_remove_map (struct uevent * uev, struct vectors * vecs)
763 struct multipath *mpp;
765 condlog(3, "%s: remove map (uevent)", uev->kernel);
766 alias = uevent_get_dm_name(uev);
768 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
771 minor = uevent_get_minor(uev);
773 pthread_cleanup_push(cleanup_lock, &vecs->lock);
775 pthread_testcancel();
776 mpp = find_mp_by_minor(vecs->mpvec, minor);
779 condlog(2, "%s: devmap not registered, can't remove",
783 if (strcmp(mpp->alias, alias)) {
784 condlog(2, "%s: map alias mismatch: have \"%s\", got \"%s\")",
785 uev->kernel, mpp->alias, alias);
789 orphan_paths(vecs->pathvec, mpp);
790 remove_map_and_stop_waiter(mpp, vecs);
792 lock_cleanup_pop(vecs->lock);
797 /* Called from CLI handler */
799 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
801 struct multipath * mpp;
803 mpp = find_mp_by_minor(vecs->mpvec, minor);
806 condlog(2, "%s: devmap not registered, can't remove",
810 if (strcmp(mpp->alias, alias)) {
811 condlog(2, "%s: minor number mismatch (map %d, event %d)",
812 mpp->alias, mpp->dmi->minor, minor);
815 return flush_map(mpp, vecs, 0);
819 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
825 condlog(3, "%s: add path (uevent)", uev->kernel);
826 if (strstr(uev->kernel, "..") != NULL) {
828 * Don't allow relative device names in the pathvec
830 condlog(0, "%s: path name is invalid", uev->kernel);
834 pthread_cleanup_push(cleanup_lock, &vecs->lock);
836 pthread_testcancel();
837 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
841 condlog(3, "%s: spurious uevent, path already in pathvec",
843 if (!pp->mpp && !strlen(pp->wwid)) {
844 condlog(3, "%s: reinitialize path", uev->kernel);
845 udev_device_unref(pp->udev);
846 pp->udev = udev_device_ref(uev->udev);
847 conf = get_multipath_config();
848 pthread_cleanup_push(put_multipath_config, conf);
849 r = pathinfo(pp, conf,
850 DI_ALL | DI_BLACKLIST);
851 pthread_cleanup_pop(1);
852 if (r == PATHINFO_OK)
853 ret = ev_add_path(pp, vecs, need_do_map);
854 else if (r == PATHINFO_SKIPPED) {
855 condlog(3, "%s: remove blacklisted path",
857 i = find_slot(vecs->pathvec, (void *)pp);
859 vector_del_slot(vecs->pathvec, i);
862 condlog(0, "%s: failed to reinitialize path",
868 lock_cleanup_pop(vecs->lock);
873 * get path vital state
875 conf = get_multipath_config();
876 pthread_cleanup_push(put_multipath_config, conf);
877 ret = alloc_path_with_pathinfo(conf, uev->udev,
878 uev->wwid, DI_ALL, &pp);
879 pthread_cleanup_pop(1);
881 if (ret == PATHINFO_SKIPPED)
883 condlog(3, "%s: failed to get path info", uev->kernel);
886 pthread_cleanup_push(cleanup_lock, &vecs->lock);
888 pthread_testcancel();
889 ret = store_path(vecs->pathvec, pp);
891 conf = get_multipath_config();
892 pp->checkint = conf->checkint;
893 put_multipath_config(conf);
894 ret = ev_add_path(pp, vecs, need_do_map);
896 condlog(0, "%s: failed to store path info, "
902 lock_cleanup_pop(vecs->lock);
912 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
914 struct multipath * mpp;
915 char params[PARAMS_SIZE] = {0};
917 int start_waiter = 0;
921 * need path UID to go any further
923 if (strlen(pp->wwid) == 0) {
924 condlog(0, "%s: failed to get path uid", pp->dev);
925 goto fail; /* leave path added to pathvec */
927 mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
928 if (mpp && mpp->wait_for_udev &&
929 (pathcount(mpp, PATH_UP) > 0 ||
930 (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
931 mpp->ghost_delay_tick <= 0))) {
932 /* if wait_for_udev is set and valid paths exist */
933 condlog(3, "%s: delaying path addition until %s is fully initialized",
934 pp->dev, mpp->alias);
935 mpp->wait_for_udev = 2;
936 orphan_path(pp, "waiting for create to complete");
943 if (pp->size && mpp->size != pp->size) {
944 condlog(0, "%s: failed to add new path %s, "
945 "device size mismatch",
946 mpp->alias, pp->dev);
947 int i = find_slot(vecs->pathvec, (void *)pp);
949 vector_del_slot(vecs->pathvec, i);
954 condlog(4,"%s: adopting all paths for path %s",
955 mpp->alias, pp->dev);
956 if (adopt_paths(vecs->pathvec, mpp))
957 goto fail; /* leave path added to pathvec */
959 verify_paths(mpp, vecs);
960 mpp->action = ACT_RELOAD;
961 extract_hwe_from_path(mpp);
963 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
964 orphan_path(pp, "only one path");
967 condlog(4,"%s: creating new map", pp->dev);
968 if ((mpp = add_map_with_path(vecs, pp, 1))) {
969 mpp->action = ACT_CREATE;
971 * We don't depend on ACT_CREATE, as domap will
972 * set it to ACT_NOTHING when complete.
977 goto fail; /* leave path added to pathvec */
980 /* persistent reservation check*/
981 mpath_pr_event_handle(pp);
986 if (!dm_map_present(mpp->alias)) {
987 mpp->action = ACT_CREATE;
991 * push the map to the device-mapper
993 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
994 condlog(0, "%s: failed to setup map for addition of new "
995 "path %s", mpp->alias, pp->dev);
999 * reload the map for the multipath mapped device
1002 ret = domap(mpp, params, 1);
1004 if (ret < 0 && retries-- > 0) {
1005 condlog(0, "%s: retry domap for addition of new "
1006 "path %s", mpp->alias, pp->dev);
1010 condlog(0, "%s: failed in domap for addition of new "
1011 "path %s", mpp->alias, pp->dev);
1013 * deal with asynchronous uevents :((
1015 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1016 condlog(0, "%s: ev_add_path sleep", mpp->alias);
1018 update_mpp_paths(mpp, vecs->pathvec);
1021 else if (mpp->action == ACT_RELOAD)
1022 condlog(0, "%s: giving up reload", mpp->alias);
1028 if ((mpp->action == ACT_CREATE ||
1029 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1030 wait_for_events(mpp, vecs))
1034 * update our state from kernel regardless of create or reload
1036 if (setup_multipath(vecs, mpp))
1037 goto fail; /* if setup_multipath fails, it removes the map */
1039 sync_map_state(mpp);
1042 condlog(2, "%s [%s]: path added to devmap %s",
1043 pp->dev, pp->dev_t, mpp->alias);
1049 remove_map(mpp, vecs, 1);
1051 orphan_path(pp, "failed to add path");
1056 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1061 condlog(3, "%s: remove path (uevent)", uev->kernel);
1062 delete_foreign(uev->udev);
1064 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1066 pthread_testcancel();
1067 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1069 ret = ev_remove_path(pp, vecs, need_do_map);
1070 lock_cleanup_pop(vecs->lock);
1072 /* Not an error; path might have been purged earlier */
1073 condlog(0, "%s: path already removed", uev->kernel);
1080 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1082 struct multipath * mpp;
1084 char params[PARAMS_SIZE] = {0};
1087 * avoid referring to the map of an orphaned path
1089 if ((mpp = pp->mpp)) {
1091 * transform the mp->pg vector of vectors of paths
1092 * into a mp->params string to feed the device-mapper
1094 if (update_mpp_paths(mpp, vecs->pathvec)) {
1095 condlog(0, "%s: failed to update paths",
1101 * Make sure mpp->hwe doesn't point to freed memory
1102 * We call extract_hwe_from_path() below to restore mpp->hwe
1104 if (mpp->hwe == pp->hwe)
1107 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1108 vector_del_slot(mpp->paths, i);
1111 * remove the map IF removing the last path
1113 if (VECTOR_SIZE(mpp->paths) == 0) {
1114 char alias[WWID_SIZE];
1117 * flush_map will fail if the device is open
1119 strlcpy(alias, mpp->alias, WWID_SIZE);
1120 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1121 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1122 mpp->retry_tick = 0;
1123 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1124 mpp->disable_queueing = 1;
1125 mpp->stat_map_failures++;
1126 dm_queue_if_no_path(mpp->alias, 0);
1128 if (!flush_map(mpp, vecs, 1)) {
1129 condlog(2, "%s: removed map after"
1130 " removing all paths",
1136 * Not an error, continue
1140 if (mpp->hwe == NULL)
1141 extract_hwe_from_path(mpp);
1143 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1144 condlog(0, "%s: failed to setup map for"
1145 " removal of path %s", mpp->alias, pp->dev);
1149 if (mpp->wait_for_udev) {
1150 mpp->wait_for_udev = 2;
1159 mpp->action = ACT_RELOAD;
1160 if (domap(mpp, params, 1) <= 0) {
1161 condlog(0, "%s: failed in domap for "
1162 "removal of path %s",
1163 mpp->alias, pp->dev);
1167 * update our state from kernel
1169 if (setup_multipath(vecs, mpp))
1171 sync_map_state(mpp);
1173 condlog(2, "%s [%s]: path removed from map %s",
1174 pp->dev, pp->dev_t, mpp->alias);
1179 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1180 vector_del_slot(vecs->pathvec, i);
1187 remove_map_and_stop_waiter(mpp, vecs);
1192 uev_update_path (struct uevent *uev, struct vectors * vecs)
1194 int ro, retval = 0, rc;
1196 struct config *conf;
1197 int disable_changed_wwids;
1198 int needs_reinit = 0;
1200 switch ((rc = change_foreign(uev->udev))) {
1202 /* known foreign path, ignore event */
1204 case FOREIGN_IGNORED:
1207 condlog(3, "%s: error in change_foreign", __func__);
1210 condlog(1, "%s: return code %d of change_forein is unsupported",
1215 conf = get_multipath_config();
1216 disable_changed_wwids = conf->disable_changed_wwids;
1217 put_multipath_config(conf);
1219 ro = uevent_get_disk_ro(uev);
1221 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1223 pthread_testcancel();
1225 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1227 struct multipath *mpp = pp->mpp;
1228 char wwid[WWID_SIZE];
1230 if (pp->initialized == INIT_REQUESTED_UDEV) {
1234 /* Don't deal with other types of failed initialization
1235 * now. check_path will handle it */
1236 if (!strlen(pp->wwid))
1239 strcpy(wwid, pp->wwid);
1240 get_uid(pp, pp->state, uev->udev);
1242 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1243 condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1244 uev->kernel, wwid, pp->wwid,
1245 (disable_changed_wwids ? "disallowing" :
1247 strcpy(pp->wwid, wwid);
1248 if (disable_changed_wwids) {
1249 if (!pp->wwid_changed) {
1250 pp->wwid_changed = 1;
1253 dm_fail_path(pp->mpp->alias, pp->dev_t);
1258 pp->wwid_changed = 0;
1259 udev_device_unref(pp->udev);
1260 pp->udev = udev_device_ref(uev->udev);
1261 conf = get_multipath_config();
1262 pthread_cleanup_push(put_multipath_config, conf);
1263 if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1264 condlog(1, "%s: pathinfo failed after change uevent",
1266 pthread_cleanup_pop(1);
1269 if (mpp && ro >= 0) {
1270 condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1272 if (mpp->wait_for_udev)
1273 mpp->wait_for_udev = 2;
1276 pp->mpp->force_readonly = 1;
1277 retval = reload_map(vecs, mpp, 0, 1);
1278 pp->mpp->force_readonly = 0;
1279 condlog(2, "%s: map %s reloaded (retval %d)",
1280 uev->kernel, mpp->alias, retval);
1285 lock_cleanup_pop(vecs->lock);
1287 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1289 int flag = DI_SYSFS | DI_WWID;
1291 conf = get_multipath_config();
1292 pthread_cleanup_push(put_multipath_config, conf);
1293 retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1294 pthread_cleanup_pop(1);
1296 if (retval == PATHINFO_SKIPPED) {
1297 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1302 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1305 retval = uev_add_path(uev, vecs, 1);
1310 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1312 char *action = NULL, *devt = NULL;
1316 action = uevent_get_dm_action(uev);
1319 if (strncmp(action, "PATH_FAILED", 11))
1321 devt = uevent_get_dm_path(uev);
1323 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1327 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1329 pthread_testcancel();
1330 pp = find_path_by_devt(vecs->pathvec, devt);
1333 r = io_err_stat_handle_pathfail(pp);
1335 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1338 lock_cleanup_pop(vecs->lock);
1348 map_discovery (struct vectors * vecs)
1350 struct multipath * mpp;
1353 if (dm_get_maps(vecs->mpvec))
1356 vector_foreach_slot (vecs->mpvec, mpp, i)
1357 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1358 update_multipath_status(mpp)) {
1359 remove_map(mpp, vecs, 1);
1367 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1368 void * trigger_data)
1370 struct vectors * vecs;
1375 vecs = (struct vectors *)trigger_data;
1377 if ((str != NULL) && (is_root == false) &&
1378 (strncmp(str, "list", strlen("list")) != 0) &&
1379 (strncmp(str, "show", strlen("show")) != 0)) {
1380 *reply = STRDUP("permission deny: need to be root");
1382 *len = strlen(*reply) + 1;
1386 r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1390 *reply = STRDUP("timeout\n");
1392 *reply = STRDUP("fail\n");
1394 *len = strlen(*reply) + 1;
1397 else if (!r && *len == 0) {
1398 *reply = STRDUP("ok\n");
1400 *len = strlen(*reply) + 1;
1403 /* else if (r < 0) leave *reply alone */
1409 uev_trigger (struct uevent * uev, void * trigger_data)
1412 struct vectors * vecs;
1413 struct uevent *merge_uev, *tmp;
1415 vecs = (struct vectors *)trigger_data;
1417 pthread_cleanup_push(config_cleanup, NULL);
1418 pthread_mutex_lock(&config_lock);
1419 if (running_state != DAEMON_IDLE &&
1420 running_state != DAEMON_RUNNING)
1421 pthread_cond_wait(&config_cond, &config_lock);
1422 pthread_cleanup_pop(1);
1424 if (running_state == DAEMON_SHUTDOWN)
1429 * Add events are ignored here as the tables
1430 * are not fully initialised then.
1432 if (!strncmp(uev->kernel, "dm-", 3)) {
1433 if (!uevent_is_mpath(uev)) {
1434 if (!strncmp(uev->action, "change", 6))
1435 (void)add_foreign(uev->udev);
1436 else if (!strncmp(uev->action, "remove", 6))
1437 (void)delete_foreign(uev->udev);
1440 if (!strncmp(uev->action, "change", 6)) {
1441 r = uev_add_map(uev, vecs);
1444 * the kernel-side dm-mpath issues a PATH_FAILED event
1445 * when it encounters a path IO error. It is reason-
1446 * able be the entry of path IO error accounting pro-
1449 uev_pathfail_check(uev, vecs);
1450 } else if (!strncmp(uev->action, "remove", 6)) {
1451 r = uev_remove_map(uev, vecs);
1457 * path add/remove/change event, add/remove maybe merged
1459 list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1460 if (!strncmp(merge_uev->action, "add", 3))
1461 r += uev_add_path(merge_uev, vecs, 0);
1462 if (!strncmp(merge_uev->action, "remove", 6))
1463 r += uev_remove_path(merge_uev, vecs, 0);
1466 if (!strncmp(uev->action, "add", 3))
1467 r += uev_add_path(uev, vecs, 1);
1468 if (!strncmp(uev->action, "remove", 6))
1469 r += uev_remove_path(uev, vecs, 1);
1470 if (!strncmp(uev->action, "change", 6))
1471 r += uev_update_path(uev, vecs);
1477 static void rcu_unregister(void *param)
1479 rcu_unregister_thread();
1483 ueventloop (void * ap)
1485 struct udev *udev = ap;
1487 pthread_cleanup_push(rcu_unregister, NULL);
1488 rcu_register_thread();
1489 if (uevent_listen(udev))
1490 condlog(0, "error starting uevent listener");
1491 pthread_cleanup_pop(1);
1496 uevqloop (void * ap)
1498 pthread_cleanup_push(rcu_unregister, NULL);
1499 rcu_register_thread();
1500 if (uevent_dispatch(&uev_trigger, ap))
1501 condlog(0, "error starting uevent dispatcher");
1502 pthread_cleanup_pop(1);
1506 uxlsnrloop (void * ap)
1510 pthread_cleanup_push(rcu_unregister, NULL);
1511 rcu_register_thread();
1513 ux_sock = ux_socket_listen(DEFAULT_SOCKET);
1514 if (ux_sock == -1) {
1515 condlog(1, "could not create uxsock: %d", errno);
1519 pthread_cleanup_push(uxsock_cleanup, (void *)ux_sock);
1522 condlog(1, "Failed to init uxsock listener");
1527 /* Tell main thread that thread has started */
1528 post_config_state(DAEMON_CONFIGURE);
1530 set_handler_callback(LIST+PATHS, cli_list_paths);
1531 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1532 set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1533 set_handler_callback(LIST+PATH, cli_list_path);
1534 set_handler_callback(LIST+MAPS, cli_list_maps);
1535 set_handler_callback(LIST+STATUS, cli_list_status);
1536 set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1537 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1538 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1539 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1540 set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1541 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1542 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1543 set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1544 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1545 set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1546 set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1547 set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1548 set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1549 set_handler_callback(LIST+CONFIG, cli_list_config);
1550 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1551 set_handler_callback(LIST+DEVICES, cli_list_devices);
1552 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1553 set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1554 set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1555 set_handler_callback(ADD+PATH, cli_add_path);
1556 set_handler_callback(DEL+PATH, cli_del_path);
1557 set_handler_callback(ADD+MAP, cli_add_map);
1558 set_handler_callback(DEL+MAP, cli_del_map);
1559 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1560 set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1561 set_handler_callback(SUSPEND+MAP, cli_suspend);
1562 set_handler_callback(RESUME+MAP, cli_resume);
1563 set_handler_callback(RESIZE+MAP, cli_resize);
1564 set_handler_callback(RELOAD+MAP, cli_reload);
1565 set_handler_callback(RESET+MAP, cli_reassign);
1566 set_handler_callback(REINSTATE+PATH, cli_reinstate);
1567 set_handler_callback(FAIL+PATH, cli_fail);
1568 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1569 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1570 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1571 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1572 set_unlocked_handler_callback(QUIT, cli_quit);
1573 set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1574 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1575 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1576 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1577 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1578 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1579 set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1580 set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1581 set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1584 uxsock_listen(&uxsock_trigger, ux_sock, ap);
1587 pthread_cleanup_pop(1); /* uxsock_cleanup */
1589 pthread_cleanup_pop(1); /* rcu_unregister */
1596 post_config_state(DAEMON_SHUTDOWN);
1600 fail_path (struct path * pp, int del_active)
1605 condlog(2, "checker failed path %s in map %s",
1606 pp->dev_t, pp->mpp->alias);
1608 dm_fail_path(pp->mpp->alias, pp->dev_t);
1610 update_queue_mode_del_path(pp->mpp);
1614 * caller must have locked the path list before calling that function
1617 reinstate_path (struct path * pp, int add_active)
1624 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1625 condlog(0, "%s: reinstate failed", pp->dev_t);
1628 condlog(2, "%s: reinstated", pp->dev_t);
1630 update_queue_mode_add_path(pp->mpp);
1636 enable_group(struct path * pp)
1638 struct pathgroup * pgp;
1641 * if path is added through uev_add_path, pgindex can be unset.
1642 * next update_strings() will set it, upon map reload event.
1644 * we can safely return here, because upon map reload, all
1645 * PG will be enabled.
1647 if (!pp->mpp->pg || !pp->pgindex)
1650 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1652 if (pgp->status == PGSTATE_DISABLED) {
1653 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1654 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1659 mpvec_garbage_collector (struct vectors * vecs)
1661 struct multipath * mpp;
1667 vector_foreach_slot (vecs->mpvec, mpp, i) {
1668 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1669 condlog(2, "%s: remove dead map", mpp->alias);
1670 remove_map_and_stop_waiter(mpp, vecs);
1676 /* This is called after a path has started working again. It the multipath
1677 * device for this path uses the followover failback type, and this is the
1678 * best pathgroup, and this is the first path in the pathgroup to come back
1679 * up, then switch to this pathgroup */
1681 followover_should_failback(struct path * pp)
1683 struct pathgroup * pgp;
1687 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1688 !pp->mpp->pg || !pp->pgindex ||
1689 pp->pgindex != pp->mpp->bestpg)
1692 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1693 vector_foreach_slot(pgp->paths, pp1, i) {
1696 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1703 missing_uev_wait_tick(struct vectors *vecs)
1705 struct multipath * mpp;
1707 int timed_out = 0, delayed_reconfig;
1708 struct config *conf;
1710 vector_foreach_slot (vecs->mpvec, mpp, i) {
1711 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1713 condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1714 if (mpp->wait_for_udev > 1 &&
1715 update_map(mpp, vecs, 0)) {
1716 /* update_map removed map */
1720 mpp->wait_for_udev = 0;
1724 conf = get_multipath_config();
1725 delayed_reconfig = conf->delayed_reconfig;
1726 put_multipath_config(conf);
1727 if (timed_out && delayed_reconfig &&
1728 !need_to_delay_reconfig(vecs)) {
1729 condlog(2, "reconfigure (delayed)");
1730 set_config_state(DAEMON_CONFIGURE);
1735 ghost_delay_tick(struct vectors *vecs)
1737 struct multipath * mpp;
1740 vector_foreach_slot (vecs->mpvec, mpp, i) {
1741 if (mpp->ghost_delay_tick <= 0)
1743 if (--mpp->ghost_delay_tick <= 0) {
1744 condlog(0, "%s: timed out waiting for active path",
1746 mpp->force_udev_reload = 1;
1747 if (update_map(mpp, vecs, 0) != 0) {
1748 /* update_map removed map */
1757 defered_failback_tick (vector mpvec)
1759 struct multipath * mpp;
1762 vector_foreach_slot (mpvec, mpp, i) {
1764 * deferred failback getting sooner
1766 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1767 mpp->failback_tick--;
1769 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1770 switch_pathgroup(mpp);
1776 retry_count_tick(vector mpvec)
1778 struct multipath *mpp;
1781 vector_foreach_slot (mpvec, mpp, i) {
1782 if (mpp->retry_tick > 0) {
1783 mpp->stat_total_queueing_time++;
1784 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1785 if(--mpp->retry_tick == 0) {
1786 mpp->stat_map_failures++;
1787 dm_queue_if_no_path(mpp->alias, 0);
1788 condlog(2, "%s: Disable queueing", mpp->alias);
1794 int update_prio(struct path *pp, int refresh_all)
1798 struct pathgroup * pgp;
1799 int i, j, changed = 0;
1800 struct config *conf;
1803 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1804 vector_foreach_slot (pgp->paths, pp1, j) {
1805 oldpriority = pp1->priority;
1806 conf = get_multipath_config();
1807 pthread_cleanup_push(put_multipath_config,
1809 pathinfo(pp1, conf, DI_PRIO);
1810 pthread_cleanup_pop(1);
1811 if (pp1->priority != oldpriority)
1817 oldpriority = pp->priority;
1818 conf = get_multipath_config();
1819 pthread_cleanup_push(put_multipath_config, conf);
1820 if (pp->state != PATH_DOWN)
1821 pathinfo(pp, conf, DI_PRIO);
1822 pthread_cleanup_pop(1);
1824 if (pp->priority == oldpriority)
1829 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1831 if (reload_map(vecs, mpp, refresh, 1))
1835 if (setup_multipath(vecs, mpp) != 0)
1837 sync_map_state(mpp);
1843 * Returns '1' if the path has been checked, '-1' if it was blacklisted
1847 check_path (struct vectors * vecs, struct path * pp, int ticks)
1850 int new_path_up = 0;
1851 int chkr_new_path_up = 0;
1853 int disable_reinstate = 0;
1854 int oldchkrstate = pp->chkrstate;
1855 int retrigger_tries, checkint, max_checkint, verbosity;
1856 struct config *conf;
1859 if ((pp->initialized == INIT_OK ||
1860 pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1864 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1866 return 0; /* don't check this path yet */
1868 conf = get_multipath_config();
1869 retrigger_tries = conf->retrigger_tries;
1870 checkint = conf->checkint;
1871 max_checkint = conf->max_checkint;
1872 verbosity = conf->verbosity;
1873 put_multipath_config(conf);
1875 if (pp->checkint == CHECKINT_UNDEF) {
1876 condlog(0, "%s: BUG: checkint is not set", pp->dev);
1877 pp->checkint = checkint;
1880 if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
1881 if (pp->retriggers < retrigger_tries) {
1882 condlog(2, "%s: triggering change event to reinitialize",
1884 pp->initialized = INIT_REQUESTED_UDEV;
1886 sysfs_attr_set_value(pp->udev, "uevent", "change",
1890 condlog(1, "%s: not initialized after %d udev retriggers",
1891 pp->dev, retrigger_tries);
1893 * Make sure that the "add missing path" code path
1894 * below may reinstate the path later, if it ever
1896 * The WWID needs not be cleared; if it was set, the
1897 * state hadn't been INIT_MISSING_UDEV in the first
1900 pp->initialized = INIT_FAILED;
1906 * provision a next check soonest,
1907 * in case we exit abnormaly from here
1909 pp->tick = checkint;
1911 newstate = path_offline(pp);
1913 * Wait for uevent for removed paths;
1914 * some LLDDs like zfcp keep paths unavailable
1915 * without sending uevents.
1917 if (newstate == PATH_REMOVED)
1918 newstate = PATH_DOWN;
1920 if (newstate == PATH_UP) {
1921 conf = get_multipath_config();
1922 pthread_cleanup_push(put_multipath_config, conf);
1923 newstate = get_state(pp, conf, 1, newstate);
1924 pthread_cleanup_pop(1);
1926 checker_clear_message(&pp->checker);
1928 if (pp->wwid_changed) {
1929 condlog(2, "%s: path wwid has changed. Refusing to use",
1931 newstate = PATH_DOWN;
1934 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1935 condlog(2, "%s: unusable path - checker failed", pp->dev);
1936 LOG_MSG(2, verbosity, pp);
1937 conf = get_multipath_config();
1938 pthread_cleanup_push(put_multipath_config, conf);
1939 pathinfo(pp, conf, 0);
1940 pthread_cleanup_pop(1);
1944 if (!strlen(pp->wwid) && pp->initialized == INIT_FAILED &&
1945 (newstate == PATH_UP || newstate == PATH_GHOST)) {
1946 condlog(2, "%s: add missing path", pp->dev);
1947 conf = get_multipath_config();
1948 pthread_cleanup_push(put_multipath_config, conf);
1949 ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1950 pthread_cleanup_pop(1);
1951 /* INIT_OK implies ret == PATHINFO_OK */
1952 if (pp->initialized == INIT_OK) {
1953 ev_add_path(pp, vecs, 1);
1957 * We failed multiple times to initialize this
1958 * path properly. Don't re-check too often.
1960 pp->checkint = max_checkint;
1961 if (ret == PATHINFO_SKIPPED)
1968 * Async IO in flight. Keep the previous path state
1969 * and reschedule as soon as possible
1971 if (newstate == PATH_PENDING) {
1976 * Synchronize with kernel state
1978 if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1979 condlog(1, "%s: Could not synchronize with kernel state",
1981 pp->dmstate = PSTATE_UNDEF;
1983 /* if update_multipath_strings orphaned the path, quit early */
1987 if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1988 pp->state = PATH_SHAKY;
1990 * to reschedule as soon as possible,so that this path can
1991 * be recoverd in time
1997 if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1998 pp->wait_checks > 0) {
1999 if (pp->mpp->nr_active > 0) {
2000 pp->state = PATH_DELAYED;
2004 pp->wait_checks = 0;
2008 * don't reinstate failed path, if its in stand-by
2009 * and if target supports only implicit tpgs mode.
2010 * this will prevent unnecessary i/o by dm on stand-by
2011 * paths if there are no other active paths in map.
2013 disable_reinstate = (newstate == PATH_GHOST &&
2014 pp->mpp->nr_active == 0 &&
2015 pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
2017 pp->chkrstate = newstate;
2018 if (newstate != pp->state) {
2019 int oldstate = pp->state;
2020 pp->state = newstate;
2022 LOG_MSG(1, verbosity, pp);
2025 * upon state change, reset the checkint
2026 * to the shortest delay
2028 conf = get_multipath_config();
2029 pp->checkint = conf->checkint;
2030 put_multipath_config(conf);
2032 if (newstate != PATH_UP && newstate != PATH_GHOST) {
2034 * proactively fail path in the DM
2036 if (oldstate == PATH_UP ||
2037 oldstate == PATH_GHOST) {
2039 if (pp->mpp->delay_wait_checks > 0 &&
2040 pp->watch_checks > 0) {
2041 pp->wait_checks = pp->mpp->delay_wait_checks;
2042 pp->watch_checks = 0;
2046 if (pp->wait_checks > 0)
2048 pp->mpp->delay_wait_checks;
2052 * cancel scheduled failback
2054 pp->mpp->failback_tick = 0;
2056 pp->mpp->stat_path_failures++;
2060 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2061 if (pp->mpp->prflag) {
2063 * Check Persistent Reservation.
2065 condlog(2, "%s: checking persistent "
2066 "reservation registration", pp->dev);
2067 mpath_pr_event_handle(pp);
2072 * reinstate this path
2074 if (oldstate != PATH_UP &&
2075 oldstate != PATH_GHOST) {
2076 if (pp->mpp->delay_watch_checks > 0)
2077 pp->watch_checks = pp->mpp->delay_watch_checks;
2080 if (pp->watch_checks > 0)
2084 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2085 condlog(3, "%s: reload map", pp->dev);
2086 ev_add_path(pp, vecs, 1);
2092 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2093 chkr_new_path_up = 1;
2096 * if at least one path is up in a group, and
2097 * the group is disabled, re-enable it
2099 if (newstate == PATH_UP)
2102 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2103 if ((pp->dmstate == PSTATE_FAILED ||
2104 pp->dmstate == PSTATE_UNDEF) &&
2105 !disable_reinstate) {
2106 /* Clear IO errors */
2107 if (reinstate_path(pp, 0)) {
2108 condlog(3, "%s: reload map", pp->dev);
2109 ev_add_path(pp, vecs, 1);
2114 LOG_MSG(4, verbosity, pp);
2115 if (pp->checkint != max_checkint) {
2117 * double the next check delay.
2118 * max at conf->max_checkint
2120 if (pp->checkint < (max_checkint / 2))
2121 pp->checkint = 2 * pp->checkint;
2123 pp->checkint = max_checkint;
2125 condlog(4, "%s: delay next check %is",
2126 pp->dev_t, pp->checkint);
2128 if (pp->watch_checks > 0)
2130 pp->tick = pp->checkint;
2133 else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2134 if (pp->dmstate == PSTATE_ACTIVE ||
2135 pp->dmstate == PSTATE_UNDEF)
2137 if (newstate == PATH_DOWN) {
2138 int log_checker_err;
2140 conf = get_multipath_config();
2141 log_checker_err = conf->log_checker_err;
2142 put_multipath_config(conf);
2143 if (log_checker_err == LOG_CHKR_ERR_ONCE)
2144 LOG_MSG(3, verbosity, pp);
2146 LOG_MSG(2, verbosity, pp);
2150 pp->state = newstate;
2152 if (pp->mpp->wait_for_udev)
2155 * path prio refreshing
2157 condlog(4, "path prio refresh");
2159 if (update_prio(pp, new_path_up) &&
2160 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2161 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2162 update_path_groups(pp->mpp, vecs, !new_path_up);
2163 else if (need_switch_pathgroup(pp->mpp, 0)) {
2164 if (pp->mpp->pgfailback > 0 &&
2165 (new_path_up || pp->mpp->failback_tick <= 0))
2166 pp->mpp->failback_tick =
2167 pp->mpp->pgfailback + 1;
2168 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2169 (chkr_new_path_up && followover_should_failback(pp)))
2170 switch_pathgroup(pp->mpp);
2176 checkerloop (void *ap)
2178 struct vectors *vecs;
2182 struct timespec last_time;
2183 struct config *conf;
2185 pthread_cleanup_push(rcu_unregister, NULL);
2186 rcu_register_thread();
2187 mlockall(MCL_CURRENT | MCL_FUTURE);
2188 vecs = (struct vectors *)ap;
2189 condlog(2, "path checkers start up");
2191 /* Tweak start time for initial path check */
2192 if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2193 last_time.tv_sec = 0;
2195 last_time.tv_sec -= 1;
2198 struct timespec diff_time, start_time, end_time;
2199 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2201 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2202 start_time.tv_sec = 0;
2203 if (start_time.tv_sec && last_time.tv_sec) {
2204 timespecsub(&start_time, &last_time, &diff_time);
2205 condlog(4, "tick (%lu.%06lu secs)",
2206 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2207 last_time = start_time;
2208 ticks = diff_time.tv_sec;
2211 condlog(4, "tick (%d ticks)", ticks);
2215 sd_notify(0, "WATCHDOG=1");
2217 rc = set_config_state(DAEMON_RUNNING);
2218 if (rc == ETIMEDOUT) {
2219 condlog(4, "timeout waiting for DAEMON_IDLE");
2221 } else if (rc == EINVAL)
2222 /* daemon shutdown */
2225 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2227 pthread_testcancel();
2228 vector_foreach_slot (vecs->pathvec, pp, i) {
2229 rc = check_path(vecs, pp, ticks);
2231 vector_del_slot(vecs->pathvec, i);
2237 lock_cleanup_pop(vecs->lock);
2239 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2241 pthread_testcancel();
2242 defered_failback_tick(vecs->mpvec);
2243 retry_count_tick(vecs->mpvec);
2244 missing_uev_wait_tick(vecs);
2245 ghost_delay_tick(vecs);
2246 lock_cleanup_pop(vecs->lock);
2251 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2253 pthread_testcancel();
2254 condlog(4, "map garbage collection");
2255 mpvec_garbage_collector(vecs);
2257 lock_cleanup_pop(vecs->lock);
2260 diff_time.tv_nsec = 0;
2261 if (start_time.tv_sec &&
2262 clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2263 timespecsub(&end_time, &start_time, &diff_time);
2265 unsigned int max_checkint;
2267 condlog(3, "checked %d path%s in %lu.%06lu secs",
2268 num_paths, num_paths > 1 ? "s" : "",
2270 diff_time.tv_nsec / 1000);
2271 conf = get_multipath_config();
2272 max_checkint = conf->max_checkint;
2273 put_multipath_config(conf);
2274 if (diff_time.tv_sec > max_checkint)
2275 condlog(1, "path checkers took longer "
2276 "than %lu seconds, consider "
2277 "increasing max_polling_interval",
2282 post_config_state(DAEMON_IDLE);
2283 conf = get_multipath_config();
2284 strict_timing = conf->strict_timing;
2285 put_multipath_config(conf);
2289 if (diff_time.tv_nsec) {
2290 diff_time.tv_sec = 0;
2292 1000UL * 1000 * 1000 - diff_time.tv_nsec;
2294 diff_time.tv_sec = 1;
2296 condlog(3, "waiting for %lu.%06lu secs",
2298 diff_time.tv_nsec / 1000);
2299 if (nanosleep(&diff_time, NULL) != 0) {
2300 condlog(3, "nanosleep failed with error %d",
2302 conf = get_multipath_config();
2303 conf->strict_timing = 0;
2304 put_multipath_config(conf);
2309 pthread_cleanup_pop(1);
2314 configure (struct vectors * vecs)
2316 struct multipath * mpp;
2320 struct config *conf;
2321 static int force_reload = FORCE_RELOAD_WEAK;
2323 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2324 condlog(0, "couldn't allocate path vec in configure");
2328 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2329 condlog(0, "couldn't allocate multipath vec in configure");
2333 if (!(mpvec = vector_alloc())) {
2334 condlog(0, "couldn't allocate new maps vec in configure");
2339 * probe for current path (from sysfs) and map (from dm) sets
2341 ret = path_discovery(vecs->pathvec, DI_ALL);
2343 condlog(0, "configure failed at path discovery");
2347 conf = get_multipath_config();
2348 pthread_cleanup_push(put_multipath_config, conf);
2349 vector_foreach_slot (vecs->pathvec, pp, i){
2350 if (filter_path(conf, pp) > 0){
2351 vector_del_slot(vecs->pathvec, i);
2356 pthread_cleanup_pop(1);
2358 if (map_discovery(vecs)) {
2359 condlog(0, "configure failed at map discovery");
2364 * create new set of maps & push changed ones into dm
2365 * In the first call, use FORCE_RELOAD_WEAK to avoid making
2366 * superfluous ACT_RELOAD ioctls. Later calls are done
2367 * with FORCE_RELOAD_YES.
2369 ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2370 if (force_reload == FORCE_RELOAD_WEAK)
2371 force_reload = FORCE_RELOAD_YES;
2373 condlog(0, "configure failed while coalescing paths");
2378 * may need to remove some maps which are no longer relevant
2379 * e.g., due to blacklist changes in conf file
2381 if (coalesce_maps(vecs, mpvec)) {
2382 condlog(0, "configure failed while coalescing maps");
2388 sync_maps_state(mpvec);
2389 vector_foreach_slot(mpvec, mpp, i){
2390 if (remember_wwid(mpp->wwid) == 1)
2391 trigger_paths_udev_change(mpp, true);
2396 * purge dm of old maps
2401 * save new set of maps formed by considering current path state
2403 vector_free(vecs->mpvec);
2404 vecs->mpvec = mpvec;
2407 * start dm event waiter threads for these new maps
2409 vector_foreach_slot(vecs->mpvec, mpp, i) {
2410 if (wait_for_events(mpp, vecs)) {
2411 remove_map(mpp, vecs, 1);
2415 if (setup_multipath(vecs, mpp))
2426 need_to_delay_reconfig(struct vectors * vecs)
2428 struct multipath *mpp;
2431 if (!VECTOR_SIZE(vecs->mpvec))
2434 vector_foreach_slot(vecs->mpvec, mpp, i) {
2435 if (mpp->wait_for_udev)
2441 void rcu_free_config(struct rcu_head *head)
2443 struct config *conf = container_of(head, struct config, rcu);
2449 reconfigure (struct vectors * vecs)
2451 struct config * old, *conf;
2453 conf = load_config(DEFAULT_CONFIGFILE);
2458 * free old map and path vectors ... they use old conf state
2460 if (VECTOR_SIZE(vecs->mpvec))
2461 remove_maps_and_stop_waiters(vecs);
2463 free_pathvec(vecs->pathvec, FREE_PATHS);
2464 vecs->pathvec = NULL;
2465 delete_all_foreign();
2467 /* Re-read any timezone changes */
2470 dm_tgt_version(conf->version, TGT_MPATH);
2472 conf->verbosity = verbosity;
2473 if (bindings_read_only)
2474 conf->bindings_read_only = bindings_read_only;
2475 uxsock_timeout = conf->uxsock_timeout;
2477 old = rcu_dereference(multipath_conf);
2478 rcu_assign_pointer(multipath_conf, conf);
2479 call_rcu(&old->rcu, rcu_free_config);
2487 static struct vectors *
2490 struct vectors * vecs;
2492 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2497 pthread_mutex_init(&vecs->lock.mutex, NULL);
2503 signal_set(int signo, void (*func) (int))
2506 struct sigaction sig;
2507 struct sigaction osig;
2509 sig.sa_handler = func;
2510 sigemptyset(&sig.sa_mask);
2513 r = sigaction(signo, &sig, &osig);
2518 return (osig.sa_handler);
2522 handle_signals(bool nonfatal)
2525 condlog(2, "exit (signal)");
2532 condlog(2, "reconfigure (signal)");
2533 set_config_state(DAEMON_CONFIGURE);
2535 if (log_reset_sig) {
2536 condlog(2, "reset log (signal)");
2565 condlog(3, "SIGUSR2 received");
2573 /* block all signals */
2575 /* SIGPIPE occurs if logging fails */
2576 sigdelset(&set, SIGPIPE);
2577 pthread_sigmask(SIG_SETMASK, &set, NULL);
2579 /* Other signals will be unblocked in the uxlsnr thread */
2580 signal_set(SIGHUP, sighup);
2581 signal_set(SIGUSR1, sigusr1);
2582 signal_set(SIGUSR2, sigusr2);
2583 signal_set(SIGINT, sigend);
2584 signal_set(SIGTERM, sigend);
2585 signal_set(SIGPIPE, sigend);
2592 static struct sched_param sched_param = {
2593 .sched_priority = 99
2596 res = sched_setscheduler (0, SCHED_RR, &sched_param);
2599 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2606 #ifdef OOM_SCORE_ADJ_MIN
2608 char *file = "/proc/self/oom_score_adj";
2609 int score = OOM_SCORE_ADJ_MIN;
2612 char *file = "/proc/self/oom_adj";
2613 int score = OOM_ADJUST_MIN;
2619 envp = getenv("OOMScoreAdjust");
2621 condlog(3, "Using systemd provided OOMScoreAdjust");
2625 if (stat(file, &st) == 0){
2626 fp = fopen(file, "w");
2628 condlog(0, "couldn't fopen %s : %s", file,
2632 fprintf(fp, "%i", score);
2636 if (errno != ENOENT) {
2637 condlog(0, "couldn't stat %s : %s", file,
2641 #ifdef OOM_ADJUST_MIN
2642 file = "/proc/self/oom_adj";
2643 score = OOM_ADJUST_MIN;
2648 condlog(0, "couldn't adjust oom score");
2652 child (void * param)
2654 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2655 pthread_attr_t log_attr, misc_attr, uevent_attr;
2656 struct vectors * vecs;
2657 struct multipath * mpp;
2660 unsigned long checkint;
2661 int startup_done = 0;
2665 struct config *conf;
2667 int queue_without_daemon;
2669 mlockall(MCL_CURRENT | MCL_FUTURE);
2673 setup_thread_attr(&misc_attr, 64 * 1024, 0);
2674 setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2675 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2676 setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2679 setup_thread_attr(&log_attr, 64 * 1024, 0);
2680 log_thread_start(&log_attr);
2681 pthread_attr_destroy(&log_attr);
2683 pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2685 condlog(1, "failed to create pidfile");
2691 post_config_state(DAEMON_START);
2693 condlog(2, "--------start up--------");
2694 condlog(2, "read " DEFAULT_CONFIGFILE);
2696 conf = load_config(DEFAULT_CONFIGFILE);
2701 conf->verbosity = verbosity;
2702 if (bindings_read_only)
2703 conf->bindings_read_only = bindings_read_only;
2704 uxsock_timeout = conf->uxsock_timeout;
2705 rcu_assign_pointer(multipath_conf, conf);
2706 if (init_checkers(conf->multipath_dir)) {
2707 condlog(0, "failed to initialize checkers");
2710 if (init_prio(conf->multipath_dir)) {
2711 condlog(0, "failed to initialize prioritizers");
2714 /* Failing this is non-fatal */
2716 init_foreign(conf->multipath_dir);
2719 poll_dmevents = dmevent_poll_supported();
2720 setlogmask(LOG_UPTO(conf->verbosity + 3));
2722 envp = getenv("LimitNOFILE");
2725 condlog(2,"Using systemd provided open fds limit of %s", envp);
2727 set_max_fds(conf->max_fds);
2729 vecs = gvecs = init_vecs();
2737 envp = getenv("WATCHDOG_USEC");
2738 if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2739 /* Value is in microseconds */
2740 conf->max_checkint = checkint / 1000000;
2741 /* Rescale checkint */
2742 if (conf->checkint > conf->max_checkint)
2743 conf->checkint = conf->max_checkint;
2745 conf->checkint = conf->max_checkint / 4;
2746 condlog(3, "enabling watchdog, interval %d max %d",
2747 conf->checkint, conf->max_checkint);
2748 use_watchdog = conf->checkint;
2752 * Startup done, invalidate configuration
2756 pthread_cleanup_push(config_cleanup, NULL);
2757 pthread_mutex_lock(&config_lock);
2759 __post_config_state(DAEMON_IDLE);
2760 rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
2762 /* Wait for uxlsnr startup */
2763 while (running_state == DAEMON_IDLE)
2764 pthread_cond_wait(&config_cond, &config_lock);
2766 pthread_cleanup_pop(1);
2769 condlog(0, "failed to create cli listener: %d", rc);
2772 else if (running_state != DAEMON_CONFIGURE) {
2773 condlog(0, "cli listener failed to start");
2777 if (poll_dmevents) {
2778 if (init_dmevent_waiter(vecs)) {
2779 condlog(0, "failed to allocate dmevents waiter info");
2782 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2783 wait_dmevents, NULL))) {
2784 condlog(0, "failed to create dmevent waiter thread: %d",
2791 * Start uevent listener early to catch events
2793 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2794 condlog(0, "failed to create uevent thread: %d", rc);
2797 pthread_attr_destroy(&uevent_attr);
2802 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2803 condlog(0,"failed to create checker loop thread: %d", rc);
2806 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2807 condlog(0, "failed to create uevent dispatcher: %d", rc);
2810 pthread_attr_destroy(&misc_attr);
2812 while (running_state != DAEMON_SHUTDOWN) {
2813 pthread_cleanup_push(config_cleanup, NULL);
2814 pthread_mutex_lock(&config_lock);
2815 if (running_state != DAEMON_CONFIGURE &&
2816 running_state != DAEMON_SHUTDOWN) {
2817 pthread_cond_wait(&config_cond, &config_lock);
2819 pthread_cleanup_pop(1);
2820 if (running_state == DAEMON_CONFIGURE) {
2821 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2823 pthread_testcancel();
2824 if (!need_to_delay_reconfig(vecs)) {
2827 conf = get_multipath_config();
2828 conf->delayed_reconfig = 1;
2829 put_multipath_config(conf);
2831 lock_cleanup_pop(vecs->lock);
2832 post_config_state(DAEMON_IDLE);
2834 if (!startup_done) {
2835 sd_notify(0, "READY=1");
2843 conf = get_multipath_config();
2844 queue_without_daemon = conf->queue_without_daemon;
2845 put_multipath_config(conf);
2846 if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2847 vector_foreach_slot(vecs->mpvec, mpp, i)
2848 dm_queue_if_no_path(mpp->alias, 0);
2849 remove_maps_and_stop_waiters(vecs);
2850 unlock(&vecs->lock);
2852 pthread_cancel(check_thr);
2853 pthread_cancel(uevent_thr);
2854 pthread_cancel(uxlsnr_thr);
2855 pthread_cancel(uevq_thr);
2857 pthread_cancel(dmevent_thr);
2859 pthread_join(check_thr, NULL);
2860 pthread_join(uevent_thr, NULL);
2861 pthread_join(uxlsnr_thr, NULL);
2862 pthread_join(uevq_thr, NULL);
2864 pthread_join(dmevent_thr, NULL);
2866 stop_io_err_stat_thread();
2869 free_pathvec(vecs->pathvec, FREE_PATHS);
2870 vecs->pathvec = NULL;
2871 unlock(&vecs->lock);
2873 pthread_mutex_destroy(&vecs->lock.mutex);
2881 cleanup_dmevent_waiter();
2886 /* We're done here */
2887 condlog(3, "unlink pidfile");
2888 unlink(DEFAULT_PIDFILE);
2890 condlog(2, "--------shut down-------");
2896 * Freeing config must be done after condlog() and dm_lib_exit(),
2897 * because logging functions like dlog() and dm_write_log()
2898 * reference the config.
2900 conf = rcu_dereference(multipath_conf);
2901 rcu_assign_pointer(multipath_conf, NULL);
2902 call_rcu(&conf->rcu, rcu_free_config);
2905 pthread_attr_destroy(&waiter_attr);
2906 pthread_attr_destroy(&io_err_stat_attr);
2908 dbg_free_final(NULL);
2912 sd_notify(0, "ERRNO=0");
2918 sd_notify(0, "ERRNO=1");
2931 if( (pid = fork()) < 0){
2932 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2940 if ( (pid = fork()) < 0)
2941 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2946 fprintf(stderr, "cannot chdir to '/', continuing\n");
2948 dev_null_fd = open("/dev/null", O_RDWR);
2949 if (dev_null_fd < 0){
2950 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2955 close(STDIN_FILENO);
2956 if (dup(dev_null_fd) < 0) {
2957 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2961 close(STDOUT_FILENO);
2962 if (dup(dev_null_fd) < 0) {
2963 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2967 close(STDERR_FILENO);
2968 if (dup(dev_null_fd) < 0) {
2969 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2974 daemon_pid = getpid();
2979 main (int argc, char *argv[])
2981 extern char *optarg;
2986 struct config *conf;
2988 ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2989 "Manipulated through RCU");
2990 ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2991 "Suppress complaints about unprotected running_state reads");
2992 ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2993 "Suppress complaints about this scalar variable");
2997 if (getuid() != 0) {
2998 fprintf(stderr, "need to be root\n");
3002 /* make sure we don't lock any path */
3004 fprintf(stderr, "can't chdir to root directory : %s\n",
3006 umask(umask(077) | 022);
3008 pthread_cond_init_mono(&config_cond);
3011 libmp_udev_set_sync_support(0);
3013 while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
3019 //debug=1; /* ### comment me out ### */
3022 if (sizeof(optarg) > sizeof(char *) ||
3023 !isdigit(optarg[0]))
3026 verbosity = atoi(optarg);
3033 conf = load_config(DEFAULT_CONFIGFILE);
3037 conf->verbosity = verbosity;
3038 uxsock_timeout = conf->uxsock_timeout;
3039 err = uxclnt(optarg, uxsock_timeout + 100);
3043 bindings_read_only = 1;
3046 condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3052 fprintf(stderr, "Invalid argument '-%c'\n",
3057 if (optind < argc) {
3063 conf = load_config(DEFAULT_CONFIGFILE);
3067 conf->verbosity = verbosity;
3068 uxsock_timeout = conf->uxsock_timeout;
3069 memset(cmd, 0x0, CMDSIZE);
3070 while (optind < argc) {
3071 if (strchr(argv[optind], ' '))
3072 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3074 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3077 c += snprintf(c, s + CMDSIZE - c, "\n");
3078 err = uxclnt(s, uxsock_timeout + 100);
3084 if (!isatty(fileno(stdout)))
3085 setbuf(stdout, NULL);
3087 daemon_pid = getpid();
3099 return (child(NULL));
3102 void * mpath_pr_event_handler_fn (void * pathp )
3104 struct multipath * mpp;
3105 int i, ret, isFound;
3106 struct path * pp = (struct path *)pathp;
3107 struct prout_param_descriptor *param;
3108 struct prin_resp *resp;
3110 rcu_register_thread();
3113 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3115 condlog(0,"%s Alloc failed for prin response", pp->dev);
3119 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3120 if (ret != MPATH_PR_SUCCESS )
3122 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3126 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3127 resp->prin_descriptor.prin_readkeys.additional_length );
3129 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3131 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3132 ret = MPATH_PR_SUCCESS;
3135 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ",
3136 get_be64(mpp->reservation_key));
3139 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3141 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
3142 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3143 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3145 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3152 condlog(0, "%s: Either device not registered or ", pp->dev);
3153 condlog(0, "host is not authorised for registration. Skip path");
3154 ret = MPATH_PR_OTHER;
3158 param= malloc(sizeof(struct prout_param_descriptor));
3159 memset(param, 0 , sizeof(struct prout_param_descriptor));
3160 param->sa_flags = mpp->sa_flags;
3161 memcpy(param->sa_key, &mpp->reservation_key, 8);
3162 param->num_transportid = 0;
3164 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3166 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3167 if (ret != MPATH_PR_SUCCESS )
3169 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3177 rcu_unregister_thread();
3181 int mpath_pr_event_handle(struct path *pp)
3185 pthread_attr_t attr;
3186 struct multipath * mpp;
3188 if (pp->bus != SYSFS_BUS_SCSI)
3193 if (!get_be64(mpp->reservation_key))
3196 pthread_attr_init(&attr);
3197 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3199 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3201 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3204 pthread_attr_destroy(&attr);
3205 rc = pthread_join(thread, NULL);