2 * Copyright (c) 2004, 2005 Christophe Varoqui
3 * Copyright (c) 2005 Kiyoshi Ueda, NEC
4 * Copyright (c) 2005 Benjamin Marzinski, Redhat
5 * Copyright (c) 2005 Edward Goggin, EMC
9 #include <libdevmapper.h>
12 #include <sys/types.h>
16 #include <sys/resource.h>
18 #include <linux/oom.h>
22 #include <systemd/sd-daemon.h>
24 #include <semaphore.h>
31 #include "time-util.h"
39 static int use_watchdog;
53 #include "blacklist.h"
54 #include "structs_vec.h"
56 #include "devmapper.h"
59 #include "discovery.h"
63 #include "switchgroup.h"
65 #include "configure.h"
68 #include "pgpolicies.h"
72 #include "mpath_cmd.h"
73 #include "mpath_persist.h"
75 #include "prioritizers/alua_rtpg.h"
82 #include "cli_handlers.h"
86 #include "io_err_stat.h"
89 #include "../third-party/valgrind/drd.h"
91 #define FILE_NAME_SIZE 256
94 #define LOG_MSG(a, b) \
97 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
99 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
102 struct mpath_event_param
105 struct multipath *mpp;
111 int bindings_read_only;
113 #ifdef NO_DMEVENTS_POLL
114 int poll_dmevents = 0;
116 int poll_dmevents = 1;
118 enum daemon_status running_state = DAEMON_INIT;
120 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
121 pthread_cond_t config_cond;
124 * global copy of vecs for use in sig handlers
126 struct vectors * gvecs;
130 struct config *multipath_conf;
132 /* Local variables */
133 static volatile sig_atomic_t exit_sig;
134 static volatile sig_atomic_t reconfig_sig;
135 static volatile sig_atomic_t log_reset_sig;
140 switch (running_state) {
145 case DAEMON_CONFIGURE:
151 case DAEMON_SHUTDOWN:
158 * I love you too, systemd ...
161 sd_notify_status(void)
163 switch (running_state) {
165 return "STATUS=init";
167 return "STATUS=startup";
168 case DAEMON_CONFIGURE:
169 return "STATUS=configure";
173 case DAEMON_SHUTDOWN:
174 return "STATUS=shutdown";
180 static void do_sd_notify(enum daemon_status old_state)
183 * Checkerloop switches back and forth between idle and running state.
184 * No need to tell systemd each time.
185 * These notifications cause a lot of overhead on dbus.
187 if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
188 (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
190 sd_notify(0, sd_notify_status());
194 static void config_cleanup(void *arg)
196 pthread_mutex_unlock(&config_lock);
199 void post_config_state(enum daemon_status state)
201 pthread_mutex_lock(&config_lock);
202 if (state != running_state) {
203 enum daemon_status old_state = running_state;
205 running_state = state;
206 pthread_cond_broadcast(&config_cond);
208 do_sd_notify(old_state);
211 pthread_mutex_unlock(&config_lock);
214 int set_config_state(enum daemon_status state)
218 pthread_cleanup_push(config_cleanup, NULL);
219 pthread_mutex_lock(&config_lock);
220 if (running_state != state) {
221 enum daemon_status old_state = running_state;
223 if (running_state != DAEMON_IDLE) {
226 clock_gettime(CLOCK_MONOTONIC, &ts);
228 rc = pthread_cond_timedwait(&config_cond,
232 running_state = state;
233 pthread_cond_broadcast(&config_cond);
235 do_sd_notify(old_state);
239 pthread_cleanup_pop(1);
243 struct config *get_multipath_config(void)
246 return rcu_dereference(multipath_conf);
249 void put_multipath_config(void *arg)
255 need_switch_pathgroup (struct multipath * mpp, int refresh)
257 struct pathgroup * pgp;
267 * Refresh path priority values
270 vector_foreach_slot (mpp->pg, pgp, i) {
271 vector_foreach_slot (pgp->paths, pp, j) {
272 conf = get_multipath_config();
273 pthread_cleanup_push(put_multipath_config,
275 pathinfo(pp, conf, DI_PRIO);
276 pthread_cleanup_pop(1);
281 if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
284 bestpg = select_path_group(mpp);
285 if (mpp->pgfailback == -FAILBACK_MANUAL)
288 mpp->bestpg = bestpg;
289 if (mpp->bestpg != mpp->nextpg)
296 switch_pathgroup (struct multipath * mpp)
298 mpp->stat_switchgroup++;
299 dm_switchgroup(mpp->alias, mpp->bestpg);
300 condlog(2, "%s: switch to path group #%i",
301 mpp->alias, mpp->bestpg);
305 wait_for_events(struct multipath *mpp, struct vectors *vecs)
308 return watch_dmevents(mpp->alias);
310 return start_waiter_thread(mpp, vecs);
314 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
316 /* devices are automatically removed by the dmevent polling code,
317 * so they don't need to be manually removed here */
319 stop_waiter_thread(mpp, vecs);
320 remove_map(mpp, vecs, PURGE_VEC);
324 remove_maps_and_stop_waiters(struct vectors *vecs)
327 struct multipath * mpp;
332 if (!poll_dmevents) {
333 vector_foreach_slot(vecs->mpvec, mpp, i)
334 stop_waiter_thread(mpp, vecs);
337 unwatch_all_dmevents();
343 set_multipath_wwid (struct multipath * mpp)
345 if (strlen(mpp->wwid))
348 dm_get_uuid(mpp->alias, mpp->wwid);
351 static void set_no_path_retry(struct multipath *mpp)
353 char is_queueing = 0;
355 mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
356 if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
359 switch (mpp->no_path_retry) {
360 case NO_PATH_RETRY_UNDEF:
362 case NO_PATH_RETRY_FAIL:
364 dm_queue_if_no_path(mpp->alias, 0);
366 case NO_PATH_RETRY_QUEUE:
368 dm_queue_if_no_path(mpp->alias, 1);
371 if (mpp->nr_active > 0) {
373 dm_queue_if_no_path(mpp->alias, 1);
374 } else if (is_queueing && mpp->retry_tick == 0)
375 enter_recovery_mode(mpp);
380 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
383 if (dm_get_info(mpp->alias, &mpp->dmi)) {
384 /* Error accessing table */
385 condlog(3, "%s: cannot access table", mpp->alias);
389 if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
390 condlog(0, "%s: failed to setup multipath", mpp->alias);
395 set_no_path_retry(mpp);
396 if (VECTOR_SIZE(mpp->paths) != 0)
397 dm_cancel_deferred_remove(mpp);
402 remove_map_and_stop_waiter(mpp, vecs);
406 int update_multipath (struct vectors *vecs, char *mapname, int reset)
408 struct multipath *mpp;
409 struct pathgroup *pgp;
413 mpp = find_mp_by_alias(vecs->mpvec, mapname);
416 condlog(3, "%s: multipath map not found", mapname);
420 if (__setup_multipath(vecs, mpp, reset))
421 return 1; /* mpp freed in setup_multipath */
424 * compare checkers states with DM states
426 vector_foreach_slot (mpp->pg, pgp, i) {
427 vector_foreach_slot (pgp->paths, pp, j) {
428 if (pp->dmstate != PSTATE_FAILED)
431 if (pp->state != PATH_DOWN) {
432 struct config *conf = get_multipath_config();
433 int oldstate = pp->state;
436 conf = get_multipath_config();
437 checkint = conf->checkint;
438 put_multipath_config(conf);
439 condlog(2, "%s: mark as failed", pp->dev);
440 mpp->stat_path_failures++;
441 pp->state = PATH_DOWN;
442 if (oldstate == PATH_UP ||
443 oldstate == PATH_GHOST)
444 update_queue_mode_del_path(mpp);
448 * schedule the next check earlier
450 if (pp->tick > checkint)
459 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
462 char params[PARAMS_SIZE] = {0};
465 condlog(4, "%s: updating new map", mpp->alias);
466 if (adopt_paths(vecs->pathvec, mpp)) {
467 condlog(0, "%s: failed to adopt paths for new map update",
472 verify_paths(mpp, vecs);
473 mpp->action = ACT_RELOAD;
475 extract_hwe_from_path(mpp);
476 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
477 condlog(0, "%s: failed to setup new map in update", mpp->alias);
481 if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
482 condlog(0, "%s: map_udate sleep", mpp->alias);
489 if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
490 condlog(0, "%s: failed to create new map", mpp->alias);
491 remove_map(mpp, vecs, 1);
495 if (setup_multipath(vecs, mpp))
501 condlog(0, "%s: failed reload in new map update", mpp->alias);
505 static struct multipath *
506 add_map_without_path (struct vectors *vecs, const char *alias)
508 struct multipath * mpp = alloc_multipath();
518 mpp->alias = STRDUP(alias);
520 if (dm_get_info(mpp->alias, &mpp->dmi)) {
521 condlog(3, "%s: cannot access table", mpp->alias);
524 set_multipath_wwid(mpp);
525 conf = get_multipath_config();
526 mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
527 put_multipath_config(conf);
529 if (update_multipath_table(mpp, vecs->pathvec, 1))
531 if (update_multipath_status(mpp))
534 if (!vector_alloc_slot(vecs->mpvec))
537 vector_set_slot(vecs->mpvec, mpp);
539 if (update_map(mpp, vecs, 1) != 0) /* map removed */
544 remove_map(mpp, vecs, PURGE_VEC);
549 coalesce_maps(struct vectors *vecs, vector nmpv)
551 struct multipath * ompp;
552 vector ompv = vecs->mpvec;
553 unsigned int i, reassign_maps;
556 conf = get_multipath_config();
557 reassign_maps = conf->reassign_maps;
558 put_multipath_config(conf);
559 vector_foreach_slot (ompv, ompp, i) {
560 condlog(3, "%s: coalesce map", ompp->alias);
561 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
563 * remove all current maps not allowed by the
564 * current configuration
566 if (dm_flush_map(ompp->alias)) {
567 condlog(0, "%s: unable to flush devmap",
570 * may be just because the device is open
572 if (setup_multipath(vecs, ompp) != 0) {
576 if (!vector_alloc_slot(nmpv))
579 vector_set_slot(nmpv, ompp);
581 vector_del_slot(ompv, i);
586 condlog(2, "%s devmap removed", ompp->alias);
588 } else if (reassign_maps) {
589 condlog(3, "%s: Reassign existing device-mapper"
590 " devices", ompp->alias);
591 dm_reassign(ompp->alias);
598 sync_maps_state(vector mpvec)
601 struct multipath *mpp;
603 vector_foreach_slot (mpvec, mpp, i)
608 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
613 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
615 r = dm_flush_map(mpp->alias);
617 * clear references to this map before flushing so we can ignore
618 * the spurious uevent we may generate with the dm_flush_map call below
622 * May not really be an error -- if the map was already flushed
623 * from the device mapper by dmsetup(8) for instance.
626 condlog(0, "%s: can't flush", mpp->alias);
628 condlog(2, "%s: devmap deferred remove", mpp->alias);
629 mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
635 condlog(2, "%s: map flushed", mpp->alias);
638 orphan_paths(vecs->pathvec, mpp);
639 remove_map_and_stop_waiter(mpp, vecs);
645 uev_add_map (struct uevent * uev, struct vectors * vecs)
648 int major = -1, minor = -1, rc;
650 condlog(3, "%s: add map (uevent)", uev->kernel);
651 alias = uevent_get_dm_name(uev);
653 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
654 major = uevent_get_major(uev);
655 minor = uevent_get_minor(uev);
656 alias = dm_mapname(major, minor);
658 condlog(2, "%s: mapname not found for %d:%d",
659 uev->kernel, major, minor);
663 pthread_cleanup_push(cleanup_lock, &vecs->lock);
665 pthread_testcancel();
666 rc = ev_add_map(uev->kernel, alias, vecs);
667 lock_cleanup_pop(vecs->lock);
673 * ev_add_map expects that the multipath device already exists in kernel
674 * before it is called. It just adds a device to multipathd or updates an
678 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
680 struct multipath * mpp;
681 int delayed_reconfig, reassign_maps;
684 if (!dm_is_mpath(alias)) {
685 condlog(4, "%s: not a multipath map", alias);
689 mpp = find_mp_by_alias(vecs->mpvec, alias);
692 if (mpp->wait_for_udev > 1) {
693 condlog(2, "%s: performing delayed actions",
695 if (update_map(mpp, vecs, 0))
696 /* setup multipathd removed the map */
699 conf = get_multipath_config();
700 delayed_reconfig = conf->delayed_reconfig;
701 reassign_maps = conf->reassign_maps;
702 put_multipath_config(conf);
703 if (mpp->wait_for_udev) {
704 mpp->wait_for_udev = 0;
705 if (delayed_reconfig &&
706 !need_to_delay_reconfig(vecs)) {
707 condlog(2, "reconfigure (delayed)");
708 set_config_state(DAEMON_CONFIGURE);
713 * Not really an error -- we generate our own uevent
714 * if we create a multipath mapped device as a result
718 condlog(3, "%s: Reassign existing device-mapper devices",
724 condlog(2, "%s: adding map", alias);
727 * now we can register the map
729 if ((mpp = add_map_without_path(vecs, alias))) {
731 condlog(2, "%s: devmap %s registered", alias, dev);
734 condlog(2, "%s: ev_add_map failed", dev);
740 uev_remove_map (struct uevent * uev, struct vectors * vecs)
744 struct multipath *mpp;
746 condlog(3, "%s: remove map (uevent)", uev->kernel);
747 alias = uevent_get_dm_name(uev);
749 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
752 minor = uevent_get_minor(uev);
754 pthread_cleanup_push(cleanup_lock, &vecs->lock);
756 pthread_testcancel();
757 mpp = find_mp_by_minor(vecs->mpvec, minor);
760 condlog(2, "%s: devmap not registered, can't remove",
764 if (strcmp(mpp->alias, alias)) {
765 condlog(2, "%s: minor number mismatch (map %d, event %d)",
766 mpp->alias, mpp->dmi->minor, minor);
770 orphan_paths(vecs->pathvec, mpp);
771 remove_map_and_stop_waiter(mpp, vecs);
773 lock_cleanup_pop(vecs->lock);
778 /* Called from CLI handler */
780 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
782 struct multipath * mpp;
784 mpp = find_mp_by_minor(vecs->mpvec, minor);
787 condlog(2, "%s: devmap not registered, can't remove",
791 if (strcmp(mpp->alias, alias)) {
792 condlog(2, "%s: minor number mismatch (map %d, event %d)",
793 mpp->alias, mpp->dmi->minor, minor);
796 return flush_map(mpp, vecs, 0);
800 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
806 condlog(3, "%s: add path (uevent)", uev->kernel);
807 if (strstr(uev->kernel, "..") != NULL) {
809 * Don't allow relative device names in the pathvec
811 condlog(0, "%s: path name is invalid", uev->kernel);
815 pthread_cleanup_push(cleanup_lock, &vecs->lock);
817 pthread_testcancel();
818 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
822 condlog(3, "%s: spurious uevent, path already in pathvec",
824 if (!pp->mpp && !strlen(pp->wwid)) {
825 condlog(3, "%s: reinitialize path", uev->kernel);
826 udev_device_unref(pp->udev);
827 pp->udev = udev_device_ref(uev->udev);
828 conf = get_multipath_config();
829 pthread_cleanup_push(put_multipath_config, conf);
830 r = pathinfo(pp, conf,
831 DI_ALL | DI_BLACKLIST);
832 pthread_cleanup_pop(1);
833 if (r == PATHINFO_OK)
834 ret = ev_add_path(pp, vecs, need_do_map);
835 else if (r == PATHINFO_SKIPPED) {
836 condlog(3, "%s: remove blacklisted path",
838 i = find_slot(vecs->pathvec, (void *)pp);
840 vector_del_slot(vecs->pathvec, i);
843 condlog(0, "%s: failed to reinitialize path",
849 lock_cleanup_pop(vecs->lock);
854 * get path vital state
856 conf = get_multipath_config();
857 pthread_cleanup_push(put_multipath_config, conf);
858 ret = alloc_path_with_pathinfo(conf, uev->udev,
859 uev->wwid, DI_ALL, &pp);
860 pthread_cleanup_pop(1);
862 if (ret == PATHINFO_SKIPPED)
864 condlog(3, "%s: failed to get path info", uev->kernel);
867 pthread_cleanup_push(cleanup_lock, &vecs->lock);
869 pthread_testcancel();
870 ret = store_path(vecs->pathvec, pp);
872 conf = get_multipath_config();
873 pp->checkint = conf->checkint;
874 put_multipath_config(conf);
875 ret = ev_add_path(pp, vecs, need_do_map);
877 condlog(0, "%s: failed to store path info, "
883 lock_cleanup_pop(vecs->lock);
893 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
895 struct multipath * mpp;
896 char params[PARAMS_SIZE] = {0};
898 int start_waiter = 0;
902 * need path UID to go any further
904 if (strlen(pp->wwid) == 0) {
905 condlog(0, "%s: failed to get path uid", pp->dev);
906 goto fail; /* leave path added to pathvec */
908 mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
909 if (mpp && mpp->wait_for_udev &&
910 (pathcount(mpp, PATH_UP) > 0 ||
911 (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
912 mpp->ghost_delay_tick <= 0))) {
913 /* if wait_for_udev is set and valid paths exist */
914 condlog(3, "%s: delaying path addition until %s is fully initialized",
915 pp->dev, mpp->alias);
916 mpp->wait_for_udev = 2;
917 orphan_path(pp, "waiting for create to complete");
924 if (pp->size && mpp->size != pp->size) {
925 condlog(0, "%s: failed to add new path %s, "
926 "device size mismatch",
927 mpp->alias, pp->dev);
928 int i = find_slot(vecs->pathvec, (void *)pp);
930 vector_del_slot(vecs->pathvec, i);
935 condlog(4,"%s: adopting all paths for path %s",
936 mpp->alias, pp->dev);
937 if (adopt_paths(vecs->pathvec, mpp))
938 goto fail; /* leave path added to pathvec */
940 verify_paths(mpp, vecs);
941 mpp->action = ACT_RELOAD;
942 extract_hwe_from_path(mpp);
944 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
945 orphan_path(pp, "only one path");
948 condlog(4,"%s: creating new map", pp->dev);
949 if ((mpp = add_map_with_path(vecs, pp, 1))) {
950 mpp->action = ACT_CREATE;
952 * We don't depend on ACT_CREATE, as domap will
953 * set it to ACT_NOTHING when complete.
958 goto fail; /* leave path added to pathvec */
961 /* persistent reservation check*/
962 mpath_pr_event_handle(pp);
967 if (!dm_map_present(mpp->alias)) {
968 mpp->action = ACT_CREATE;
972 * push the map to the device-mapper
974 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
975 condlog(0, "%s: failed to setup map for addition of new "
976 "path %s", mpp->alias, pp->dev);
980 * reload the map for the multipath mapped device
983 ret = domap(mpp, params, 1);
985 if (ret < 0 && retries-- > 0) {
986 condlog(0, "%s: retry domap for addition of new "
987 "path %s", mpp->alias, pp->dev);
991 condlog(0, "%s: failed in domap for addition of new "
992 "path %s", mpp->alias, pp->dev);
994 * deal with asynchronous uevents :((
996 if (mpp->action == ACT_RELOAD && retries-- > 0) {
997 condlog(0, "%s: ev_add_path sleep", mpp->alias);
999 update_mpp_paths(mpp, vecs->pathvec);
1002 else if (mpp->action == ACT_RELOAD)
1003 condlog(0, "%s: giving up reload", mpp->alias);
1009 if ((mpp->action == ACT_CREATE ||
1010 (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1011 wait_for_events(mpp, vecs))
1015 * update our state from kernel regardless of create or reload
1017 if (setup_multipath(vecs, mpp))
1018 goto fail; /* if setup_multipath fails, it removes the map */
1020 sync_map_state(mpp);
1023 condlog(2, "%s [%s]: path added to devmap %s",
1024 pp->dev, pp->dev_t, mpp->alias);
1030 remove_map(mpp, vecs, 1);
1032 orphan_path(pp, "failed to add path");
1037 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1042 condlog(3, "%s: remove path (uevent)", uev->kernel);
1043 delete_foreign(uev->udev);
1045 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1047 pthread_testcancel();
1048 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1050 ret = ev_remove_path(pp, vecs, need_do_map);
1051 lock_cleanup_pop(vecs->lock);
1053 /* Not an error; path might have been purged earlier */
1054 condlog(0, "%s: path already removed", uev->kernel);
1061 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1063 struct multipath * mpp;
1065 char params[PARAMS_SIZE] = {0};
1068 * avoid referring to the map of an orphaned path
1070 if ((mpp = pp->mpp)) {
1072 * transform the mp->pg vector of vectors of paths
1073 * into a mp->params string to feed the device-mapper
1075 if (update_mpp_paths(mpp, vecs->pathvec)) {
1076 condlog(0, "%s: failed to update paths",
1082 * Make sure mpp->hwe doesn't point to freed memory
1083 * We call extract_hwe_from_path() below to restore mpp->hwe
1085 if (mpp->hwe == pp->hwe)
1088 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1089 vector_del_slot(mpp->paths, i);
1092 * remove the map IF removing the last path
1094 if (VECTOR_SIZE(mpp->paths) == 0) {
1095 char alias[WWID_SIZE];
1098 * flush_map will fail if the device is open
1100 strncpy(alias, mpp->alias, WWID_SIZE);
1101 if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1102 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1103 mpp->retry_tick = 0;
1104 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1105 mpp->disable_queueing = 1;
1106 mpp->stat_map_failures++;
1107 dm_queue_if_no_path(mpp->alias, 0);
1109 if (!flush_map(mpp, vecs, 1)) {
1110 condlog(2, "%s: removed map after"
1111 " removing all paths",
1117 * Not an error, continue
1121 if (mpp->hwe == NULL)
1122 extract_hwe_from_path(mpp);
1124 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1125 condlog(0, "%s: failed to setup map for"
1126 " removal of path %s", mpp->alias, pp->dev);
1130 if (mpp->wait_for_udev) {
1131 mpp->wait_for_udev = 2;
1140 mpp->action = ACT_RELOAD;
1141 if (domap(mpp, params, 1) <= 0) {
1142 condlog(0, "%s: failed in domap for "
1143 "removal of path %s",
1144 mpp->alias, pp->dev);
1148 * update our state from kernel
1150 if (setup_multipath(vecs, mpp))
1152 sync_map_state(mpp);
1154 condlog(2, "%s [%s]: path removed from map %s",
1155 pp->dev, pp->dev_t, mpp->alias);
1160 if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1161 vector_del_slot(vecs->pathvec, i);
1168 remove_map_and_stop_waiter(mpp, vecs);
1173 uev_update_path (struct uevent *uev, struct vectors * vecs)
1175 int ro, retval = 0, rc;
1177 struct config *conf;
1178 int disable_changed_wwids;
1179 int needs_reinit = 0;
1181 switch ((rc = change_foreign(uev->udev))) {
1183 /* known foreign path, ignore event */
1185 case FOREIGN_IGNORED:
1188 condlog(3, "%s: error in change_foreign", __func__);
1191 condlog(1, "%s: return code %d of change_forein is unsupported",
1196 conf = get_multipath_config();
1197 disable_changed_wwids = conf->disable_changed_wwids;
1198 put_multipath_config(conf);
1200 ro = uevent_get_disk_ro(uev);
1202 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1204 pthread_testcancel();
1206 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1208 struct multipath *mpp = pp->mpp;
1209 char wwid[WWID_SIZE];
1211 strcpy(wwid, pp->wwid);
1212 get_uid(pp, pp->state, uev->udev);
1214 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1215 condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1216 uev->kernel, wwid, pp->wwid,
1217 (disable_changed_wwids ? "disallowing" :
1219 if (disable_changed_wwids &&
1220 (strlen(wwid) || pp->wwid_changed)) {
1221 strcpy(pp->wwid, wwid);
1222 if (!pp->wwid_changed) {
1223 pp->wwid_changed = 1;
1226 dm_fail_path(pp->mpp->alias, pp->dev_t);
1229 } else if (!disable_changed_wwids)
1230 strcpy(pp->wwid, wwid);
1232 pp->wwid_changed = 0;
1234 udev_device_unref(pp->udev);
1235 pp->udev = udev_device_ref(uev->udev);
1236 conf = get_multipath_config();
1237 pthread_cleanup_push(put_multipath_config, conf);
1238 if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1239 condlog(1, "%s: pathinfo failed after change uevent",
1241 pthread_cleanup_pop(1);
1244 if (pp->initialized == INIT_REQUESTED_UDEV)
1246 else if (mpp && ro >= 0) {
1247 condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1249 if (mpp->wait_for_udev)
1250 mpp->wait_for_udev = 2;
1253 pp->mpp->force_readonly = 1;
1254 retval = reload_map(vecs, mpp, 0, 1);
1255 pp->mpp->force_readonly = 0;
1256 condlog(2, "%s: map %s reloaded (retval %d)",
1257 uev->kernel, mpp->alias, retval);
1262 lock_cleanup_pop(vecs->lock);
1264 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1266 int flag = DI_SYSFS | DI_WWID;
1268 conf = get_multipath_config();
1269 pthread_cleanup_push(put_multipath_config, conf);
1270 retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1271 pthread_cleanup_pop(1);
1273 if (retval == PATHINFO_SKIPPED) {
1274 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1279 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1282 retval = uev_add_path(uev, vecs, 1);
1287 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1289 char *action = NULL, *devt = NULL;
1293 action = uevent_get_dm_action(uev);
1296 if (strncmp(action, "PATH_FAILED", 11))
1298 devt = uevent_get_dm_path(uev);
1300 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1304 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1306 pthread_testcancel();
1307 pp = find_path_by_devt(vecs->pathvec, devt);
1310 r = io_err_stat_handle_pathfail(pp);
1312 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1315 lock_cleanup_pop(vecs->lock);
1325 map_discovery (struct vectors * vecs)
1327 struct multipath * mpp;
1330 if (dm_get_maps(vecs->mpvec))
1333 vector_foreach_slot (vecs->mpvec, mpp, i)
1334 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1335 update_multipath_status(mpp)) {
1336 remove_map(mpp, vecs, 1);
1344 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1345 void * trigger_data)
1347 struct vectors * vecs;
1352 vecs = (struct vectors *)trigger_data;
1354 if ((str != NULL) && (is_root == false) &&
1355 (strncmp(str, "list", strlen("list")) != 0) &&
1356 (strncmp(str, "show", strlen("show")) != 0)) {
1357 *reply = STRDUP("permission deny: need to be root");
1359 *len = strlen(*reply) + 1;
1363 r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1367 *reply = STRDUP("timeout\n");
1369 *reply = STRDUP("fail\n");
1371 *len = strlen(*reply) + 1;
1374 else if (!r && *len == 0) {
1375 *reply = STRDUP("ok\n");
1377 *len = strlen(*reply) + 1;
1380 /* else if (r < 0) leave *reply alone */
1386 uev_trigger (struct uevent * uev, void * trigger_data)
1389 struct vectors * vecs;
1390 struct uevent *merge_uev, *tmp;
1392 vecs = (struct vectors *)trigger_data;
1394 pthread_cleanup_push(config_cleanup, NULL);
1395 pthread_mutex_lock(&config_lock);
1396 if (running_state != DAEMON_IDLE &&
1397 running_state != DAEMON_RUNNING)
1398 pthread_cond_wait(&config_cond, &config_lock);
1399 pthread_cleanup_pop(1);
1401 if (running_state == DAEMON_SHUTDOWN)
1406 * Add events are ignored here as the tables
1407 * are not fully initialised then.
1409 if (!strncmp(uev->kernel, "dm-", 3)) {
1410 if (!uevent_is_mpath(uev)) {
1411 if (!strncmp(uev->action, "change", 6))
1412 (void)add_foreign(uev->udev);
1413 else if (!strncmp(uev->action, "remove", 6))
1414 (void)delete_foreign(uev->udev);
1417 if (!strncmp(uev->action, "change", 6)) {
1418 r = uev_add_map(uev, vecs);
1421 * the kernel-side dm-mpath issues a PATH_FAILED event
1422 * when it encounters a path IO error. It is reason-
1423 * able be the entry of path IO error accounting pro-
1426 uev_pathfail_check(uev, vecs);
1427 } else if (!strncmp(uev->action, "remove", 6)) {
1428 r = uev_remove_map(uev, vecs);
1434 * path add/remove/change event, add/remove maybe merged
1436 list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1437 if (!strncmp(merge_uev->action, "add", 3))
1438 r += uev_add_path(merge_uev, vecs, 0);
1439 if (!strncmp(merge_uev->action, "remove", 6))
1440 r += uev_remove_path(merge_uev, vecs, 0);
1443 if (!strncmp(uev->action, "add", 3))
1444 r += uev_add_path(uev, vecs, 1);
1445 if (!strncmp(uev->action, "remove", 6))
1446 r += uev_remove_path(uev, vecs, 1);
1447 if (!strncmp(uev->action, "change", 6))
1448 r += uev_update_path(uev, vecs);
1454 static void rcu_unregister(void *param)
1456 rcu_unregister_thread();
1460 ueventloop (void * ap)
1462 struct udev *udev = ap;
1464 pthread_cleanup_push(rcu_unregister, NULL);
1465 rcu_register_thread();
1466 if (uevent_listen(udev))
1467 condlog(0, "error starting uevent listener");
1468 pthread_cleanup_pop(1);
1473 uevqloop (void * ap)
1475 pthread_cleanup_push(rcu_unregister, NULL);
1476 rcu_register_thread();
1477 if (uevent_dispatch(&uev_trigger, ap))
1478 condlog(0, "error starting uevent dispatcher");
1479 pthread_cleanup_pop(1);
1483 uxlsnrloop (void * ap)
1486 condlog(1, "Failed to init uxsock listener");
1489 pthread_cleanup_push(rcu_unregister, NULL);
1490 rcu_register_thread();
1491 set_handler_callback(LIST+PATHS, cli_list_paths);
1492 set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1493 set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1494 set_handler_callback(LIST+PATH, cli_list_path);
1495 set_handler_callback(LIST+MAPS, cli_list_maps);
1496 set_handler_callback(LIST+STATUS, cli_list_status);
1497 set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1498 set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1499 set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1500 set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1501 set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1502 set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1503 set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1504 set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1505 set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1506 set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1507 set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1508 set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1509 set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1510 set_handler_callback(LIST+CONFIG, cli_list_config);
1511 set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1512 set_handler_callback(LIST+DEVICES, cli_list_devices);
1513 set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1514 set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1515 set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1516 set_handler_callback(ADD+PATH, cli_add_path);
1517 set_handler_callback(DEL+PATH, cli_del_path);
1518 set_handler_callback(ADD+MAP, cli_add_map);
1519 set_handler_callback(DEL+MAP, cli_del_map);
1520 set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1521 set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1522 set_handler_callback(SUSPEND+MAP, cli_suspend);
1523 set_handler_callback(RESUME+MAP, cli_resume);
1524 set_handler_callback(RESIZE+MAP, cli_resize);
1525 set_handler_callback(RELOAD+MAP, cli_reload);
1526 set_handler_callback(RESET+MAP, cli_reassign);
1527 set_handler_callback(REINSTATE+PATH, cli_reinstate);
1528 set_handler_callback(FAIL+PATH, cli_fail);
1529 set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1530 set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1531 set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1532 set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1533 set_unlocked_handler_callback(QUIT, cli_quit);
1534 set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1535 set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1536 set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1537 set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1538 set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1539 set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1540 set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1541 set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1542 set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1545 uxsock_listen(&uxsock_trigger, ap);
1546 pthread_cleanup_pop(1);
1553 post_config_state(DAEMON_SHUTDOWN);
1557 fail_path (struct path * pp, int del_active)
1562 condlog(2, "checker failed path %s in map %s",
1563 pp->dev_t, pp->mpp->alias);
1565 dm_fail_path(pp->mpp->alias, pp->dev_t);
1567 update_queue_mode_del_path(pp->mpp);
1571 * caller must have locked the path list before calling that function
1574 reinstate_path (struct path * pp, int add_active)
1581 if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1582 condlog(0, "%s: reinstate failed", pp->dev_t);
1585 condlog(2, "%s: reinstated", pp->dev_t);
1587 update_queue_mode_add_path(pp->mpp);
1593 enable_group(struct path * pp)
1595 struct pathgroup * pgp;
1598 * if path is added through uev_add_path, pgindex can be unset.
1599 * next update_strings() will set it, upon map reload event.
1601 * we can safely return here, because upon map reload, all
1602 * PG will be enabled.
1604 if (!pp->mpp->pg || !pp->pgindex)
1607 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1609 if (pgp->status == PGSTATE_DISABLED) {
1610 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1611 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1616 mpvec_garbage_collector (struct vectors * vecs)
1618 struct multipath * mpp;
1624 vector_foreach_slot (vecs->mpvec, mpp, i) {
1625 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1626 condlog(2, "%s: remove dead map", mpp->alias);
1627 remove_map_and_stop_waiter(mpp, vecs);
1633 /* This is called after a path has started working again. It the multipath
1634 * device for this path uses the followover failback type, and this is the
1635 * best pathgroup, and this is the first path in the pathgroup to come back
1636 * up, then switch to this pathgroup */
1638 followover_should_failback(struct path * pp)
1640 struct pathgroup * pgp;
1644 if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1645 !pp->mpp->pg || !pp->pgindex ||
1646 pp->pgindex != pp->mpp->bestpg)
1649 pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1650 vector_foreach_slot(pgp->paths, pp1, i) {
1653 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1660 missing_uev_wait_tick(struct vectors *vecs)
1662 struct multipath * mpp;
1664 int timed_out = 0, delayed_reconfig;
1665 struct config *conf;
1667 vector_foreach_slot (vecs->mpvec, mpp, i) {
1668 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1670 condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1671 if (mpp->wait_for_udev > 1 &&
1672 update_map(mpp, vecs, 0)) {
1673 /* update_map removed map */
1677 mpp->wait_for_udev = 0;
1681 conf = get_multipath_config();
1682 delayed_reconfig = conf->delayed_reconfig;
1683 put_multipath_config(conf);
1684 if (timed_out && delayed_reconfig &&
1685 !need_to_delay_reconfig(vecs)) {
1686 condlog(2, "reconfigure (delayed)");
1687 set_config_state(DAEMON_CONFIGURE);
1692 ghost_delay_tick(struct vectors *vecs)
1694 struct multipath * mpp;
1697 vector_foreach_slot (vecs->mpvec, mpp, i) {
1698 if (mpp->ghost_delay_tick <= 0)
1700 if (--mpp->ghost_delay_tick <= 0) {
1701 condlog(0, "%s: timed out waiting for active path",
1703 mpp->force_udev_reload = 1;
1704 if (update_map(mpp, vecs, 0) != 0) {
1705 /* update_map removed map */
1714 defered_failback_tick (vector mpvec)
1716 struct multipath * mpp;
1719 vector_foreach_slot (mpvec, mpp, i) {
1721 * deferred failback getting sooner
1723 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1724 mpp->failback_tick--;
1726 if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1727 switch_pathgroup(mpp);
1733 retry_count_tick(vector mpvec)
1735 struct multipath *mpp;
1738 vector_foreach_slot (mpvec, mpp, i) {
1739 if (mpp->retry_tick > 0) {
1740 mpp->stat_total_queueing_time++;
1741 condlog(4, "%s: Retrying.. No active path", mpp->alias);
1742 if(--mpp->retry_tick == 0) {
1743 mpp->stat_map_failures++;
1744 dm_queue_if_no_path(mpp->alias, 0);
1745 condlog(2, "%s: Disable queueing", mpp->alias);
1751 int update_prio(struct path *pp, int refresh_all)
1755 struct pathgroup * pgp;
1756 int i, j, changed = 0;
1757 struct config *conf;
1760 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1761 vector_foreach_slot (pgp->paths, pp1, j) {
1762 oldpriority = pp1->priority;
1763 conf = get_multipath_config();
1764 pthread_cleanup_push(put_multipath_config,
1766 pathinfo(pp1, conf, DI_PRIO);
1767 pthread_cleanup_pop(1);
1768 if (pp1->priority != oldpriority)
1774 oldpriority = pp->priority;
1775 conf = get_multipath_config();
1776 pthread_cleanup_push(put_multipath_config, conf);
1777 if (pp->state != PATH_DOWN)
1778 pathinfo(pp, conf, DI_PRIO);
1779 pthread_cleanup_pop(1);
1781 if (pp->priority == oldpriority)
1786 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1788 if (reload_map(vecs, mpp, refresh, 1))
1792 if (setup_multipath(vecs, mpp) != 0)
1794 sync_map_state(mpp);
1800 * Returns '1' if the path has been checked, '-1' if it was blacklisted
1804 check_path (struct vectors * vecs, struct path * pp, int ticks)
1807 int new_path_up = 0;
1808 int chkr_new_path_up = 0;
1810 int disable_reinstate = 0;
1811 int oldchkrstate = pp->chkrstate;
1812 int retrigger_tries, checkint;
1813 struct config *conf;
1816 if ((pp->initialized == INIT_OK ||
1817 pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1821 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1823 return 0; /* don't check this path yet */
1825 conf = get_multipath_config();
1826 retrigger_tries = conf->retrigger_tries;
1827 checkint = conf->checkint;
1828 put_multipath_config(conf);
1829 if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1830 pp->retriggers < retrigger_tries) {
1831 condlog(2, "%s: triggering change event to reinitialize",
1833 pp->initialized = INIT_REQUESTED_UDEV;
1835 sysfs_attr_set_value(pp->udev, "uevent", "change",
1841 * provision a next check soonest,
1842 * in case we exit abnormaly from here
1844 pp->tick = checkint;
1846 newstate = path_offline(pp);
1848 * Wait for uevent for removed paths;
1849 * some LLDDs like zfcp keep paths unavailable
1850 * without sending uevents.
1852 if (newstate == PATH_REMOVED)
1853 newstate = PATH_DOWN;
1855 if (newstate == PATH_UP) {
1856 conf = get_multipath_config();
1857 pthread_cleanup_push(put_multipath_config, conf);
1858 newstate = get_state(pp, conf, 1, newstate);
1859 pthread_cleanup_pop(1);
1861 checker_clear_message(&pp->checker);
1863 if (pp->wwid_changed) {
1864 condlog(2, "%s: path wwid has changed. Refusing to use",
1866 newstate = PATH_DOWN;
1869 if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1870 condlog(2, "%s: unusable path", pp->dev);
1871 conf = get_multipath_config();
1872 pthread_cleanup_push(put_multipath_config, conf);
1873 pathinfo(pp, conf, 0);
1874 pthread_cleanup_pop(1);
1878 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1879 (newstate == PATH_UP || newstate == PATH_GHOST)) {
1880 condlog(2, "%s: add missing path", pp->dev);
1881 conf = get_multipath_config();
1882 pthread_cleanup_push(put_multipath_config, conf);
1883 ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1884 pthread_cleanup_pop(1);
1885 if (ret == PATHINFO_OK) {
1886 ev_add_path(pp, vecs, 1);
1888 } else if (ret == PATHINFO_SKIPPED)
1894 * Async IO in flight. Keep the previous path state
1895 * and reschedule as soon as possible
1897 if (newstate == PATH_PENDING) {
1902 * Synchronize with kernel state
1904 if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1905 condlog(1, "%s: Could not synchronize with kernel state",
1907 pp->dmstate = PSTATE_UNDEF;
1909 /* if update_multipath_strings orphaned the path, quit early */
1913 if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1914 pp->state = PATH_SHAKY;
1916 * to reschedule as soon as possible,so that this path can
1917 * be recoverd in time
1923 if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1924 pp->wait_checks > 0) {
1925 if (pp->mpp->nr_active > 0) {
1926 pp->state = PATH_DELAYED;
1930 pp->wait_checks = 0;
1934 * don't reinstate failed path, if its in stand-by
1935 * and if target supports only implicit tpgs mode.
1936 * this will prevent unnecessary i/o by dm on stand-by
1937 * paths if there are no other active paths in map.
1939 disable_reinstate = (newstate == PATH_GHOST &&
1940 pp->mpp->nr_active == 0 &&
1941 pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1943 pp->chkrstate = newstate;
1944 if (newstate != pp->state) {
1945 int oldstate = pp->state;
1946 pp->state = newstate;
1948 LOG_MSG(1, checker_message(&pp->checker));
1951 * upon state change, reset the checkint
1952 * to the shortest delay
1954 conf = get_multipath_config();
1955 pp->checkint = conf->checkint;
1956 put_multipath_config(conf);
1958 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1960 * proactively fail path in the DM
1962 if (oldstate == PATH_UP ||
1963 oldstate == PATH_GHOST) {
1965 if (pp->mpp->delay_wait_checks > 0 &&
1966 pp->watch_checks > 0) {
1967 pp->wait_checks = pp->mpp->delay_wait_checks;
1968 pp->watch_checks = 0;
1974 * cancel scheduled failback
1976 pp->mpp->failback_tick = 0;
1978 pp->mpp->stat_path_failures++;
1982 if(newstate == PATH_UP || newstate == PATH_GHOST){
1983 if ( pp->mpp && pp->mpp->prflag ){
1985 * Check Persistent Reservation.
1987 condlog(2, "%s: checking persistent reservation "
1988 "registration", pp->dev);
1989 mpath_pr_event_handle(pp);
1994 * reinstate this path
1996 if (oldstate != PATH_UP &&
1997 oldstate != PATH_GHOST) {
1998 if (pp->mpp->delay_watch_checks > 0)
1999 pp->watch_checks = pp->mpp->delay_watch_checks;
2002 if (pp->watch_checks > 0)
2006 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2007 condlog(3, "%s: reload map", pp->dev);
2008 ev_add_path(pp, vecs, 1);
2014 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2015 chkr_new_path_up = 1;
2018 * if at least one path is up in a group, and
2019 * the group is disabled, re-enable it
2021 if (newstate == PATH_UP)
2024 else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2025 if ((pp->dmstate == PSTATE_FAILED ||
2026 pp->dmstate == PSTATE_UNDEF) &&
2027 !disable_reinstate) {
2028 /* Clear IO errors */
2029 if (reinstate_path(pp, 0)) {
2030 condlog(3, "%s: reload map", pp->dev);
2031 ev_add_path(pp, vecs, 1);
2036 unsigned int max_checkint;
2037 LOG_MSG(4, checker_message(&pp->checker));
2038 conf = get_multipath_config();
2039 max_checkint = conf->max_checkint;
2040 put_multipath_config(conf);
2041 if (pp->checkint != max_checkint) {
2043 * double the next check delay.
2044 * max at conf->max_checkint
2046 if (pp->checkint < (max_checkint / 2))
2047 pp->checkint = 2 * pp->checkint;
2049 pp->checkint = max_checkint;
2051 condlog(4, "%s: delay next check %is",
2052 pp->dev_t, pp->checkint);
2054 if (pp->watch_checks > 0)
2056 pp->tick = pp->checkint;
2059 else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2060 if (pp->dmstate == PSTATE_ACTIVE ||
2061 pp->dmstate == PSTATE_UNDEF)
2063 if (newstate == PATH_DOWN) {
2064 int log_checker_err;
2066 conf = get_multipath_config();
2067 log_checker_err = conf->log_checker_err;
2068 put_multipath_config(conf);
2069 if (log_checker_err == LOG_CHKR_ERR_ONCE)
2070 LOG_MSG(3, checker_message(&pp->checker));
2072 LOG_MSG(2, checker_message(&pp->checker));
2076 pp->state = newstate;
2078 if (pp->mpp->wait_for_udev)
2081 * path prio refreshing
2083 condlog(4, "path prio refresh");
2085 if (update_prio(pp, new_path_up) &&
2086 (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2087 pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2088 update_path_groups(pp->mpp, vecs, !new_path_up);
2089 else if (need_switch_pathgroup(pp->mpp, 0)) {
2090 if (pp->mpp->pgfailback > 0 &&
2091 (new_path_up || pp->mpp->failback_tick <= 0))
2092 pp->mpp->failback_tick =
2093 pp->mpp->pgfailback + 1;
2094 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2095 (chkr_new_path_up && followover_should_failback(pp)))
2096 switch_pathgroup(pp->mpp);
2101 static void init_path_check_interval(struct vectors *vecs)
2103 struct config *conf;
2107 vector_foreach_slot (vecs->pathvec, pp, i) {
2108 conf = get_multipath_config();
2109 pp->checkint = conf->checkint;
2110 put_multipath_config(conf);
2115 checkerloop (void *ap)
2117 struct vectors *vecs;
2121 struct timespec last_time;
2122 struct config *conf;
2124 pthread_cleanup_push(rcu_unregister, NULL);
2125 rcu_register_thread();
2126 mlockall(MCL_CURRENT | MCL_FUTURE);
2127 vecs = (struct vectors *)ap;
2128 condlog(2, "path checkers start up");
2130 /* Tweak start time for initial path check */
2131 if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2132 last_time.tv_sec = 0;
2134 last_time.tv_sec -= 1;
2137 struct timespec diff_time, start_time, end_time;
2138 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2140 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2141 start_time.tv_sec = 0;
2142 if (start_time.tv_sec && last_time.tv_sec) {
2143 timespecsub(&start_time, &last_time, &diff_time);
2144 condlog(4, "tick (%lu.%06lu secs)",
2145 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2146 last_time = start_time;
2147 ticks = diff_time.tv_sec;
2150 condlog(4, "tick (%d ticks)", ticks);
2154 sd_notify(0, "WATCHDOG=1");
2156 rc = set_config_state(DAEMON_RUNNING);
2157 if (rc == ETIMEDOUT) {
2158 condlog(4, "timeout waiting for DAEMON_IDLE");
2162 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2164 pthread_testcancel();
2165 vector_foreach_slot (vecs->pathvec, pp, i) {
2166 rc = check_path(vecs, pp, ticks);
2168 vector_del_slot(vecs->pathvec, i);
2174 lock_cleanup_pop(vecs->lock);
2176 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2178 pthread_testcancel();
2179 defered_failback_tick(vecs->mpvec);
2180 retry_count_tick(vecs->mpvec);
2181 missing_uev_wait_tick(vecs);
2182 ghost_delay_tick(vecs);
2183 lock_cleanup_pop(vecs->lock);
2188 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2190 pthread_testcancel();
2191 condlog(4, "map garbage collection");
2192 mpvec_garbage_collector(vecs);
2194 lock_cleanup_pop(vecs->lock);
2197 diff_time.tv_nsec = 0;
2198 if (start_time.tv_sec &&
2199 clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2200 timespecsub(&end_time, &start_time, &diff_time);
2202 unsigned int max_checkint;
2204 condlog(3, "checked %d path%s in %lu.%06lu secs",
2205 num_paths, num_paths > 1 ? "s" : "",
2207 diff_time.tv_nsec / 1000);
2208 conf = get_multipath_config();
2209 max_checkint = conf->max_checkint;
2210 put_multipath_config(conf);
2211 if (diff_time.tv_sec > max_checkint)
2212 condlog(1, "path checkers took longer "
2213 "than %lu seconds, consider "
2214 "increasing max_polling_interval",
2219 post_config_state(DAEMON_IDLE);
2220 conf = get_multipath_config();
2221 strict_timing = conf->strict_timing;
2222 put_multipath_config(conf);
2226 if (diff_time.tv_nsec) {
2227 diff_time.tv_sec = 0;
2229 1000UL * 1000 * 1000 - diff_time.tv_nsec;
2231 diff_time.tv_sec = 1;
2233 condlog(3, "waiting for %lu.%06lu secs",
2235 diff_time.tv_nsec / 1000);
2236 if (nanosleep(&diff_time, NULL) != 0) {
2237 condlog(3, "nanosleep failed with error %d",
2239 conf = get_multipath_config();
2240 conf->strict_timing = 0;
2241 put_multipath_config(conf);
2246 pthread_cleanup_pop(1);
2251 configure (struct vectors * vecs)
2253 struct multipath * mpp;
2257 struct config *conf;
2258 static int force_reload = FORCE_RELOAD_WEAK;
2260 if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2261 condlog(0, "couldn't allocate path vec in configure");
2265 if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2266 condlog(0, "couldn't allocate multipath vec in configure");
2270 if (!(mpvec = vector_alloc())) {
2271 condlog(0, "couldn't allocate new maps vec in configure");
2276 * probe for current path (from sysfs) and map (from dm) sets
2278 ret = path_discovery(vecs->pathvec, DI_ALL);
2280 condlog(0, "configure failed at path discovery");
2284 vector_foreach_slot (vecs->pathvec, pp, i){
2285 conf = get_multipath_config();
2286 pthread_cleanup_push(put_multipath_config, conf);
2287 if (filter_path(conf, pp) > 0){
2288 vector_del_slot(vecs->pathvec, i);
2293 pp->checkint = conf->checkint;
2294 pthread_cleanup_pop(1);
2296 if (map_discovery(vecs)) {
2297 condlog(0, "configure failed at map discovery");
2302 * create new set of maps & push changed ones into dm
2303 * In the first call, use FORCE_RELOAD_WEAK to avoid making
2304 * superfluous ACT_RELOAD ioctls. Later calls are done
2305 * with FORCE_RELOAD_YES.
2307 ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2308 if (force_reload == FORCE_RELOAD_WEAK)
2309 force_reload = FORCE_RELOAD_YES;
2311 condlog(0, "configure failed while coalescing paths");
2316 * may need to remove some maps which are no longer relevant
2317 * e.g., due to blacklist changes in conf file
2319 if (coalesce_maps(vecs, mpvec)) {
2320 condlog(0, "configure failed while coalescing maps");
2326 sync_maps_state(mpvec);
2327 vector_foreach_slot(mpvec, mpp, i){
2328 if (remember_wwid(mpp->wwid) == 1)
2329 trigger_paths_udev_change(mpp, true);
2334 * purge dm of old maps
2339 * save new set of maps formed by considering current path state
2341 vector_free(vecs->mpvec);
2342 vecs->mpvec = mpvec;
2345 * start dm event waiter threads for these new maps
2347 vector_foreach_slot(vecs->mpvec, mpp, i) {
2348 if (wait_for_events(mpp, vecs)) {
2349 remove_map(mpp, vecs, 1);
2353 if (setup_multipath(vecs, mpp))
2360 need_to_delay_reconfig(struct vectors * vecs)
2362 struct multipath *mpp;
2365 if (!VECTOR_SIZE(vecs->mpvec))
2368 vector_foreach_slot(vecs->mpvec, mpp, i) {
2369 if (mpp->wait_for_udev)
2375 void rcu_free_config(struct rcu_head *head)
2377 struct config *conf = container_of(head, struct config, rcu);
2383 reconfigure (struct vectors * vecs)
2385 struct config * old, *conf;
2387 conf = load_config(DEFAULT_CONFIGFILE);
2392 * free old map and path vectors ... they use old conf state
2394 if (VECTOR_SIZE(vecs->mpvec))
2395 remove_maps_and_stop_waiters(vecs);
2397 free_pathvec(vecs->pathvec, FREE_PATHS);
2398 vecs->pathvec = NULL;
2399 delete_all_foreign();
2401 /* Re-read any timezone changes */
2404 dm_tgt_version(conf->version, TGT_MPATH);
2406 conf->verbosity = verbosity;
2407 if (bindings_read_only)
2408 conf->bindings_read_only = bindings_read_only;
2409 uxsock_timeout = conf->uxsock_timeout;
2411 old = rcu_dereference(multipath_conf);
2412 rcu_assign_pointer(multipath_conf, conf);
2413 call_rcu(&old->rcu, rcu_free_config);
2421 static struct vectors *
2424 struct vectors * vecs;
2426 vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2431 pthread_mutex_init(&vecs->lock.mutex, NULL);
2437 signal_set(int signo, void (*func) (int))
2440 struct sigaction sig;
2441 struct sigaction osig;
2443 sig.sa_handler = func;
2444 sigemptyset(&sig.sa_mask);
2447 r = sigaction(signo, &sig, &osig);
2452 return (osig.sa_handler);
2456 handle_signals(bool nonfatal)
2459 condlog(2, "exit (signal)");
2466 condlog(2, "reconfigure (signal)");
2467 set_config_state(DAEMON_CONFIGURE);
2469 if (log_reset_sig) {
2470 condlog(2, "reset log (signal)");
2499 condlog(3, "SIGUSR2 received");
2507 /* block all signals */
2509 /* SIGPIPE occurs if logging fails */
2510 sigdelset(&set, SIGPIPE);
2511 pthread_sigmask(SIG_SETMASK, &set, NULL);
2513 /* Other signals will be unblocked in the uxlsnr thread */
2514 signal_set(SIGHUP, sighup);
2515 signal_set(SIGUSR1, sigusr1);
2516 signal_set(SIGUSR2, sigusr2);
2517 signal_set(SIGINT, sigend);
2518 signal_set(SIGTERM, sigend);
2519 signal_set(SIGPIPE, sigend);
2526 static struct sched_param sched_param = {
2527 .sched_priority = 99
2530 res = sched_setscheduler (0, SCHED_RR, &sched_param);
2533 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2540 #ifdef OOM_SCORE_ADJ_MIN
2542 char *file = "/proc/self/oom_score_adj";
2543 int score = OOM_SCORE_ADJ_MIN;
2546 char *file = "/proc/self/oom_adj";
2547 int score = OOM_ADJUST_MIN;
2553 envp = getenv("OOMScoreAdjust");
2555 condlog(3, "Using systemd provided OOMScoreAdjust");
2559 if (stat(file, &st) == 0){
2560 fp = fopen(file, "w");
2562 condlog(0, "couldn't fopen %s : %s", file,
2566 fprintf(fp, "%i", score);
2570 if (errno != ENOENT) {
2571 condlog(0, "couldn't stat %s : %s", file,
2575 #ifdef OOM_ADJUST_MIN
2576 file = "/proc/self/oom_adj";
2577 score = OOM_ADJUST_MIN;
2582 condlog(0, "couldn't adjust oom score");
2586 child (void * param)
2588 pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2589 pthread_attr_t log_attr, misc_attr, uevent_attr;
2590 struct vectors * vecs;
2591 struct multipath * mpp;
2594 unsigned long checkint;
2595 int startup_done = 0;
2599 struct config *conf;
2601 int queue_without_daemon;
2603 mlockall(MCL_CURRENT | MCL_FUTURE);
2607 setup_thread_attr(&misc_attr, 64 * 1024, 0);
2608 setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2609 setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2610 setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2613 setup_thread_attr(&log_attr, 64 * 1024, 0);
2614 log_thread_start(&log_attr);
2615 pthread_attr_destroy(&log_attr);
2617 pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2619 condlog(1, "failed to create pidfile");
2625 post_config_state(DAEMON_START);
2627 condlog(2, "--------start up--------");
2628 condlog(2, "read " DEFAULT_CONFIGFILE);
2630 conf = load_config(DEFAULT_CONFIGFILE);
2635 conf->verbosity = verbosity;
2636 if (bindings_read_only)
2637 conf->bindings_read_only = bindings_read_only;
2638 uxsock_timeout = conf->uxsock_timeout;
2639 rcu_assign_pointer(multipath_conf, conf);
2640 if (init_checkers(conf->multipath_dir)) {
2641 condlog(0, "failed to initialize checkers");
2644 if (init_prio(conf->multipath_dir)) {
2645 condlog(0, "failed to initialize prioritizers");
2648 /* Failing this is non-fatal */
2650 init_foreign(conf->multipath_dir);
2653 poll_dmevents = dmevent_poll_supported();
2654 setlogmask(LOG_UPTO(conf->verbosity + 3));
2656 envp = getenv("LimitNOFILE");
2659 condlog(2,"Using systemd provided open fds limit of %s", envp);
2660 } else if (conf->max_fds) {
2661 struct rlimit fd_limit;
2663 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2664 condlog(0, "can't get open fds limit: %s",
2666 fd_limit.rlim_cur = 0;
2667 fd_limit.rlim_max = 0;
2669 if (fd_limit.rlim_cur < conf->max_fds) {
2670 fd_limit.rlim_cur = conf->max_fds;
2671 if (fd_limit.rlim_max < conf->max_fds)
2672 fd_limit.rlim_max = conf->max_fds;
2673 if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2674 condlog(0, "can't set open fds limit to "
2676 fd_limit.rlim_cur, fd_limit.rlim_max,
2679 condlog(3, "set open fds limit to %lu/%lu",
2680 fd_limit.rlim_cur, fd_limit.rlim_max);
2686 vecs = gvecs = init_vecs();
2694 envp = getenv("WATCHDOG_USEC");
2695 if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2696 /* Value is in microseconds */
2697 conf->max_checkint = checkint / 1000000;
2698 /* Rescale checkint */
2699 if (conf->checkint > conf->max_checkint)
2700 conf->checkint = conf->max_checkint;
2702 conf->checkint = conf->max_checkint / 4;
2703 condlog(3, "enabling watchdog, interval %d max %d",
2704 conf->checkint, conf->max_checkint);
2705 use_watchdog = conf->checkint;
2709 * Startup done, invalidate configuration
2714 * Signal start of configuration
2716 post_config_state(DAEMON_CONFIGURE);
2718 init_path_check_interval(vecs);
2720 if (poll_dmevents) {
2721 if (init_dmevent_waiter(vecs)) {
2722 condlog(0, "failed to allocate dmevents waiter info");
2725 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2726 wait_dmevents, NULL))) {
2727 condlog(0, "failed to create dmevent waiter thread: %d",
2734 * Start uevent listener early to catch events
2736 if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2737 condlog(0, "failed to create uevent thread: %d", rc);
2740 pthread_attr_destroy(&uevent_attr);
2741 if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2742 condlog(0, "failed to create cli listener: %d", rc);
2749 if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2750 condlog(0,"failed to create checker loop thread: %d", rc);
2753 if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2754 condlog(0, "failed to create uevent dispatcher: %d", rc);
2757 pthread_attr_destroy(&misc_attr);
2759 while (running_state != DAEMON_SHUTDOWN) {
2760 pthread_cleanup_push(config_cleanup, NULL);
2761 pthread_mutex_lock(&config_lock);
2762 if (running_state != DAEMON_CONFIGURE &&
2763 running_state != DAEMON_SHUTDOWN) {
2764 pthread_cond_wait(&config_cond, &config_lock);
2766 pthread_cleanup_pop(1);
2767 if (running_state == DAEMON_CONFIGURE) {
2768 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2770 pthread_testcancel();
2771 if (!need_to_delay_reconfig(vecs)) {
2774 conf = get_multipath_config();
2775 conf->delayed_reconfig = 1;
2776 put_multipath_config(conf);
2778 lock_cleanup_pop(vecs->lock);
2779 post_config_state(DAEMON_IDLE);
2781 if (!startup_done) {
2782 sd_notify(0, "READY=1");
2790 conf = get_multipath_config();
2791 queue_without_daemon = conf->queue_without_daemon;
2792 put_multipath_config(conf);
2793 if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2794 vector_foreach_slot(vecs->mpvec, mpp, i)
2795 dm_queue_if_no_path(mpp->alias, 0);
2796 remove_maps_and_stop_waiters(vecs);
2797 unlock(&vecs->lock);
2799 pthread_cancel(check_thr);
2800 pthread_cancel(uevent_thr);
2801 pthread_cancel(uxlsnr_thr);
2802 pthread_cancel(uevq_thr);
2804 pthread_cancel(dmevent_thr);
2806 pthread_join(check_thr, NULL);
2807 pthread_join(uevent_thr, NULL);
2808 pthread_join(uxlsnr_thr, NULL);
2809 pthread_join(uevq_thr, NULL);
2811 pthread_join(dmevent_thr, NULL);
2813 stop_io_err_stat_thread();
2816 free_pathvec(vecs->pathvec, FREE_PATHS);
2817 vecs->pathvec = NULL;
2818 unlock(&vecs->lock);
2820 pthread_mutex_destroy(&vecs->lock.mutex);
2828 cleanup_dmevent_waiter();
2833 /* We're done here */
2834 condlog(3, "unlink pidfile");
2835 unlink(DEFAULT_PIDFILE);
2837 condlog(2, "--------shut down-------");
2843 * Freeing config must be done after condlog() and dm_lib_exit(),
2844 * because logging functions like dlog() and dm_write_log()
2845 * reference the config.
2847 conf = rcu_dereference(multipath_conf);
2848 rcu_assign_pointer(multipath_conf, NULL);
2849 call_rcu(&conf->rcu, rcu_free_config);
2852 pthread_attr_destroy(&waiter_attr);
2853 pthread_attr_destroy(&io_err_stat_attr);
2855 dbg_free_final(NULL);
2859 sd_notify(0, "ERRNO=0");
2865 sd_notify(0, "ERRNO=1");
2878 if( (pid = fork()) < 0){
2879 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2887 if ( (pid = fork()) < 0)
2888 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2893 fprintf(stderr, "cannot chdir to '/', continuing\n");
2895 dev_null_fd = open("/dev/null", O_RDWR);
2896 if (dev_null_fd < 0){
2897 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2902 close(STDIN_FILENO);
2903 if (dup(dev_null_fd) < 0) {
2904 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2908 close(STDOUT_FILENO);
2909 if (dup(dev_null_fd) < 0) {
2910 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2914 close(STDERR_FILENO);
2915 if (dup(dev_null_fd) < 0) {
2916 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2921 daemon_pid = getpid();
2926 main (int argc, char *argv[])
2928 extern char *optarg;
2933 struct config *conf;
2935 ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2936 "Manipulated through RCU");
2937 ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2938 "Suppress complaints about unprotected running_state reads");
2939 ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2940 "Suppress complaints about this scalar variable");
2944 if (getuid() != 0) {
2945 fprintf(stderr, "need to be root\n");
2949 /* make sure we don't lock any path */
2951 fprintf(stderr, "can't chdir to root directory : %s\n",
2953 umask(umask(077) | 022);
2955 pthread_cond_init_mono(&config_cond);
2958 libmp_udev_set_sync_support(0);
2960 while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
2966 //debug=1; /* ### comment me out ### */
2969 if (sizeof(optarg) > sizeof(char *) ||
2970 !isdigit(optarg[0]))
2973 verbosity = atoi(optarg);
2979 conf = load_config(DEFAULT_CONFIGFILE);
2983 conf->verbosity = verbosity;
2984 uxsock_timeout = conf->uxsock_timeout;
2985 uxclnt(optarg, uxsock_timeout + 100);
2989 bindings_read_only = 1;
2992 condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
2998 fprintf(stderr, "Invalid argument '-%c'\n",
3003 if (optind < argc) {
3008 conf = load_config(DEFAULT_CONFIGFILE);
3012 conf->verbosity = verbosity;
3013 uxsock_timeout = conf->uxsock_timeout;
3014 memset(cmd, 0x0, CMDSIZE);
3015 while (optind < argc) {
3016 if (strchr(argv[optind], ' '))
3017 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3019 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3022 c += snprintf(c, s + CMDSIZE - c, "\n");
3023 uxclnt(s, uxsock_timeout + 100);
3029 if (!isatty(fileno(stdout)))
3030 setbuf(stdout, NULL);
3032 daemon_pid = getpid();
3044 return (child(NULL));
3047 void * mpath_pr_event_handler_fn (void * pathp )
3049 struct multipath * mpp;
3050 int i, ret, isFound;
3051 struct path * pp = (struct path *)pathp;
3052 struct prout_param_descriptor *param;
3053 struct prin_resp *resp;
3055 rcu_register_thread();
3058 resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3060 condlog(0,"%s Alloc failed for prin response", pp->dev);
3064 ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3065 if (ret != MPATH_PR_SUCCESS )
3067 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3071 condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3072 resp->prin_descriptor.prin_readkeys.additional_length );
3074 if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3076 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3077 ret = MPATH_PR_SUCCESS;
3080 condlog(2, "Multipath reservation_key: 0x%" PRIx64 " ",
3081 get_be64(mpp->reservation_key));
3084 for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3086 condlog(2, "PR IN READKEYS[%d] reservation key:",i);
3087 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3088 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3090 condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3097 condlog(0, "%s: Either device not registered or ", pp->dev);
3098 condlog(0, "host is not authorised for registration. Skip path");
3099 ret = MPATH_PR_OTHER;
3103 param= malloc(sizeof(struct prout_param_descriptor));
3104 memset(param, 0 , sizeof(struct prout_param_descriptor));
3105 param->sa_flags = mpp->sa_flags;
3106 memcpy(param->sa_key, &mpp->reservation_key, 8);
3107 param->num_transportid = 0;
3109 condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3111 ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3112 if (ret != MPATH_PR_SUCCESS )
3114 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3122 rcu_unregister_thread();
3126 int mpath_pr_event_handle(struct path *pp)
3130 pthread_attr_t attr;
3131 struct multipath * mpp;
3135 if (!get_be64(mpp->reservation_key))
3138 pthread_attr_init(&attr);
3139 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3141 rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3143 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3146 pthread_attr_destroy(&attr);
3147 rc = pthread_join(thread, NULL);