c57aa392f95d91024fae05af2fdab0498dc7c19a
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69
70 #include "mpath_cmd.h"
71 #include "mpath_persist.h"
72
73 #include "prioritizers/alua_rtpg.h"
74
75 #include "main.h"
76 #include "pidfile.h"
77 #include "uxlsnr.h"
78 #include "uxclnt.h"
79 #include "cli.h"
80 #include "cli_handlers.h"
81 #include "lock.h"
82 #include "waiter.h"
83 #include "dmevents.h"
84 #include "io_err_stat.h"
85 #include "wwids.h"
86 #include "foreign.h"
87 #include "../third-party/valgrind/drd.h"
88
89 #define FILE_NAME_SIZE 256
90 #define CMDSIZE 160
91
92 #define LOG_MSG(lvl, verb, pp)                                  \
93 do {                                                            \
94         if (lvl <= verb) {                                      \
95                 if (pp->offline)                                \
96                         condlog(lvl, "%s: %s - path offline",   \
97                                 pp->mpp->alias, pp->dev);       \
98                 else  {                                         \
99                         const char *__m =                       \
100                                 checker_message(&pp->checker);  \
101                                                                 \
102                         if (strlen(__m))                              \
103                                 condlog(lvl, "%s: %s - %s checker%s", \
104                                         pp->mpp->alias,               \
105                                         pp->dev,                      \
106                                         checker_name(&pp->checker),   \
107                                         __m);                         \
108                 }                                                     \
109         }                                                             \
110 } while(0)
111
112 struct mpath_event_param
113 {
114         char * devname;
115         struct multipath *mpp;
116 };
117
118 int logsink;
119 int uxsock_timeout;
120 int verbosity;
121 int bindings_read_only;
122 int ignore_new_devs;
123 #ifdef NO_DMEVENTS_POLL
124 int poll_dmevents = 0;
125 #else
126 int poll_dmevents = 1;
127 #endif
128 enum daemon_status running_state = DAEMON_INIT;
129 pid_t daemon_pid;
130 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
131 pthread_cond_t config_cond;
132
133 /*
134  * global copy of vecs for use in sig handlers
135  */
136 struct vectors * gvecs;
137
138 struct udev * udev;
139
140 struct config *multipath_conf;
141
142 /* Local variables */
143 static volatile sig_atomic_t exit_sig;
144 static volatile sig_atomic_t reconfig_sig;
145 static volatile sig_atomic_t log_reset_sig;
146
147 const char *
148 daemon_status(void)
149 {
150         switch (running_state) {
151         case DAEMON_INIT:
152                 return "init";
153         case DAEMON_START:
154                 return "startup";
155         case DAEMON_CONFIGURE:
156                 return "configure";
157         case DAEMON_IDLE:
158                 return "idle";
159         case DAEMON_RUNNING:
160                 return "running";
161         case DAEMON_SHUTDOWN:
162                 return "shutdown";
163         }
164         return NULL;
165 }
166
167 /*
168  * I love you too, systemd ...
169  */
170 const char *
171 sd_notify_status(void)
172 {
173         switch (running_state) {
174         case DAEMON_INIT:
175                 return "STATUS=init";
176         case DAEMON_START:
177                 return "STATUS=startup";
178         case DAEMON_CONFIGURE:
179                 return "STATUS=configure";
180         case DAEMON_IDLE:
181         case DAEMON_RUNNING:
182                 return "STATUS=up";
183         case DAEMON_SHUTDOWN:
184                 return "STATUS=shutdown";
185         }
186         return NULL;
187 }
188
189 #ifdef USE_SYSTEMD
190 static void do_sd_notify(enum daemon_status old_state)
191 {
192         /*
193          * Checkerloop switches back and forth between idle and running state.
194          * No need to tell systemd each time.
195          * These notifications cause a lot of overhead on dbus.
196          */
197         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
198             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
199                 return;
200         sd_notify(0, sd_notify_status());
201 }
202 #endif
203
204 static void config_cleanup(void *arg)
205 {
206         pthread_mutex_unlock(&config_lock);
207 }
208
209 void post_config_state(enum daemon_status state)
210 {
211         pthread_mutex_lock(&config_lock);
212         if (state != running_state) {
213                 enum daemon_status old_state = running_state;
214
215                 running_state = state;
216                 pthread_cond_broadcast(&config_cond);
217 #ifdef USE_SYSTEMD
218                 do_sd_notify(old_state);
219 #endif
220         }
221         pthread_mutex_unlock(&config_lock);
222 }
223
224 int set_config_state(enum daemon_status state)
225 {
226         int rc = 0;
227
228         pthread_cleanup_push(config_cleanup, NULL);
229         pthread_mutex_lock(&config_lock);
230         if (running_state != state) {
231                 enum daemon_status old_state = running_state;
232
233                 if (running_state != DAEMON_IDLE) {
234                         struct timespec ts;
235
236                         clock_gettime(CLOCK_MONOTONIC, &ts);
237                         ts.tv_sec += 1;
238                         rc = pthread_cond_timedwait(&config_cond,
239                                                     &config_lock, &ts);
240                 }
241                 if (!rc) {
242                         running_state = state;
243                         pthread_cond_broadcast(&config_cond);
244 #ifdef USE_SYSTEMD
245                         do_sd_notify(old_state);
246 #endif
247                 }
248         }
249         pthread_cleanup_pop(1);
250         return rc;
251 }
252
253 struct config *get_multipath_config(void)
254 {
255         rcu_read_lock();
256         return rcu_dereference(multipath_conf);
257 }
258
259 void put_multipath_config(void *arg)
260 {
261         rcu_read_unlock();
262 }
263
264 static int
265 need_switch_pathgroup (struct multipath * mpp, int refresh)
266 {
267         struct pathgroup * pgp;
268         struct path * pp;
269         unsigned int i, j;
270         struct config *conf;
271         int bestpg;
272
273         if (!mpp)
274                 return 0;
275
276         /*
277          * Refresh path priority values
278          */
279         if (refresh) {
280                 vector_foreach_slot (mpp->pg, pgp, i) {
281                         vector_foreach_slot (pgp->paths, pp, j) {
282                                 conf = get_multipath_config();
283                                 pthread_cleanup_push(put_multipath_config,
284                                                      conf);
285                                 pathinfo(pp, conf, DI_PRIO);
286                                 pthread_cleanup_pop(1);
287                         }
288                 }
289         }
290
291         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
292                 return 0;
293
294         bestpg = select_path_group(mpp);
295         if (mpp->pgfailback == -FAILBACK_MANUAL)
296                 return 0;
297
298         mpp->bestpg = bestpg;
299         if (mpp->bestpg != mpp->nextpg)
300                 return 1;
301
302         return 0;
303 }
304
305 static void
306 switch_pathgroup (struct multipath * mpp)
307 {
308         mpp->stat_switchgroup++;
309         dm_switchgroup(mpp->alias, mpp->bestpg);
310         condlog(2, "%s: switch to path group #%i",
311                  mpp->alias, mpp->bestpg);
312 }
313
314 static int
315 wait_for_events(struct multipath *mpp, struct vectors *vecs)
316 {
317         if (poll_dmevents)
318                 return watch_dmevents(mpp->alias);
319         else
320                 return start_waiter_thread(mpp, vecs);
321 }
322
323 static void
324 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
325 {
326         /* devices are automatically removed by the dmevent polling code,
327          * so they don't need to be manually removed here */
328         if (!poll_dmevents)
329                 stop_waiter_thread(mpp, vecs);
330         remove_map(mpp, vecs, PURGE_VEC);
331 }
332
333 static void
334 remove_maps_and_stop_waiters(struct vectors *vecs)
335 {
336         int i;
337         struct multipath * mpp;
338
339         if (!vecs)
340                 return;
341
342         if (!poll_dmevents) {
343                 vector_foreach_slot(vecs->mpvec, mpp, i)
344                         stop_waiter_thread(mpp, vecs);
345         }
346         else
347                 unwatch_all_dmevents();
348
349         remove_maps(vecs);
350 }
351
352 static void
353 set_multipath_wwid (struct multipath * mpp)
354 {
355         if (strlen(mpp->wwid))
356                 return;
357
358         dm_get_uuid(mpp->alias, mpp->wwid);
359 }
360
361 static void set_no_path_retry(struct multipath *mpp)
362 {
363         char is_queueing = 0;
364
365         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
366         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
367                 is_queueing = 1;
368
369         switch (mpp->no_path_retry) {
370         case NO_PATH_RETRY_UNDEF:
371                 break;
372         case NO_PATH_RETRY_FAIL:
373                 if (is_queueing)
374                         dm_queue_if_no_path(mpp->alias, 0);
375                 break;
376         case NO_PATH_RETRY_QUEUE:
377                 if (!is_queueing)
378                         dm_queue_if_no_path(mpp->alias, 1);
379                 break;
380         default:
381                 if (mpp->nr_active > 0) {
382                         mpp->retry_tick = 0;
383                         dm_queue_if_no_path(mpp->alias, 1);
384                 } else if (is_queueing && mpp->retry_tick == 0)
385                         enter_recovery_mode(mpp);
386                 break;
387         }
388 }
389
390 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
391                       int reset)
392 {
393         if (dm_get_info(mpp->alias, &mpp->dmi)) {
394                 /* Error accessing table */
395                 condlog(3, "%s: cannot access table", mpp->alias);
396                 goto out;
397         }
398
399         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
400                 condlog(0, "%s: failed to setup multipath", mpp->alias);
401                 goto out;
402         }
403
404         if (reset) {
405                 set_no_path_retry(mpp);
406                 if (VECTOR_SIZE(mpp->paths) != 0)
407                         dm_cancel_deferred_remove(mpp);
408         }
409
410         return 0;
411 out:
412         remove_map_and_stop_waiter(mpp, vecs);
413         return 1;
414 }
415
416 int update_multipath (struct vectors *vecs, char *mapname, int reset)
417 {
418         struct multipath *mpp;
419         struct pathgroup  *pgp;
420         struct path *pp;
421         int i, j;
422
423         mpp = find_mp_by_alias(vecs->mpvec, mapname);
424
425         if (!mpp) {
426                 condlog(3, "%s: multipath map not found", mapname);
427                 return 2;
428         }
429
430         if (__setup_multipath(vecs, mpp, reset))
431                 return 1; /* mpp freed in setup_multipath */
432
433         /*
434          * compare checkers states with DM states
435          */
436         vector_foreach_slot (mpp->pg, pgp, i) {
437                 vector_foreach_slot (pgp->paths, pp, j) {
438                         if (pp->dmstate != PSTATE_FAILED)
439                                 continue;
440
441                         if (pp->state != PATH_DOWN) {
442                                 struct config *conf;
443                                 int oldstate = pp->state;
444                                 int checkint;
445
446                                 conf = get_multipath_config();
447                                 checkint = conf->checkint;
448                                 put_multipath_config(conf);
449                                 condlog(2, "%s: mark as failed", pp->dev);
450                                 mpp->stat_path_failures++;
451                                 pp->state = PATH_DOWN;
452                                 if (oldstate == PATH_UP ||
453                                     oldstate == PATH_GHOST)
454                                         update_queue_mode_del_path(mpp);
455
456                                 /*
457                                  * if opportune,
458                                  * schedule the next check earlier
459                                  */
460                                 if (pp->tick > checkint)
461                                         pp->tick = checkint;
462                         }
463                 }
464         }
465         return 0;
466 }
467
468 static int
469 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
470 {
471         int retries = 3;
472         char params[PARAMS_SIZE] = {0};
473
474 retry:
475         condlog(4, "%s: updating new map", mpp->alias);
476         if (adopt_paths(vecs->pathvec, mpp)) {
477                 condlog(0, "%s: failed to adopt paths for new map update",
478                         mpp->alias);
479                 retries = -1;
480                 goto fail;
481         }
482         verify_paths(mpp, vecs);
483         mpp->action = ACT_RELOAD;
484
485         extract_hwe_from_path(mpp);
486         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
487                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
488                 retries = -1;
489                 goto fail;
490         }
491         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
492                 condlog(0, "%s: map_udate sleep", mpp->alias);
493                 sleep(1);
494                 goto retry;
495         }
496         dm_lib_release();
497
498 fail:
499         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
500                 condlog(0, "%s: failed to create new map", mpp->alias);
501                 remove_map(mpp, vecs, 1);
502                 return 1;
503         }
504
505         if (setup_multipath(vecs, mpp))
506                 return 1;
507
508         sync_map_state(mpp);
509
510         if (retries < 0)
511                 condlog(0, "%s: failed reload in new map update", mpp->alias);
512         return 0;
513 }
514
515 static struct multipath *
516 add_map_without_path (struct vectors *vecs, const char *alias)
517 {
518         struct multipath * mpp = alloc_multipath();
519         struct config *conf;
520
521         if (!mpp)
522                 return NULL;
523         if (!alias) {
524                 FREE(mpp);
525                 return NULL;
526         }
527
528         mpp->alias = STRDUP(alias);
529
530         if (dm_get_info(mpp->alias, &mpp->dmi)) {
531                 condlog(3, "%s: cannot access table", mpp->alias);
532                 goto out;
533         }
534         set_multipath_wwid(mpp);
535         conf = get_multipath_config();
536         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
537         put_multipath_config(conf);
538
539         if (update_multipath_table(mpp, vecs->pathvec, 1))
540                 goto out;
541         if (update_multipath_status(mpp))
542                 goto out;
543
544         if (!vector_alloc_slot(vecs->mpvec))
545                 goto out;
546
547         vector_set_slot(vecs->mpvec, mpp);
548
549         if (update_map(mpp, vecs, 1) != 0) /* map removed */
550                 return NULL;
551
552         return mpp;
553 out:
554         remove_map(mpp, vecs, PURGE_VEC);
555         return NULL;
556 }
557
558 static int
559 coalesce_maps(struct vectors *vecs, vector nmpv)
560 {
561         struct multipath * ompp;
562         vector ompv = vecs->mpvec;
563         unsigned int i, reassign_maps;
564         struct config *conf;
565
566         conf = get_multipath_config();
567         reassign_maps = conf->reassign_maps;
568         put_multipath_config(conf);
569         vector_foreach_slot (ompv, ompp, i) {
570                 condlog(3, "%s: coalesce map", ompp->alias);
571                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
572                         /*
573                          * remove all current maps not allowed by the
574                          * current configuration
575                          */
576                         if (dm_flush_map(ompp->alias)) {
577                                 condlog(0, "%s: unable to flush devmap",
578                                         ompp->alias);
579                                 /*
580                                  * may be just because the device is open
581                                  */
582                                 if (setup_multipath(vecs, ompp) != 0) {
583                                         i--;
584                                         continue;
585                                 }
586                                 if (!vector_alloc_slot(nmpv))
587                                         return 1;
588
589                                 vector_set_slot(nmpv, ompp);
590
591                                 vector_del_slot(ompv, i);
592                                 i--;
593                         }
594                         else {
595                                 dm_lib_release();
596                                 condlog(2, "%s devmap removed", ompp->alias);
597                         }
598                 } else if (reassign_maps) {
599                         condlog(3, "%s: Reassign existing device-mapper"
600                                 " devices", ompp->alias);
601                         dm_reassign(ompp->alias);
602                 }
603         }
604         return 0;
605 }
606
607 static void
608 sync_maps_state(vector mpvec)
609 {
610         unsigned int i;
611         struct multipath *mpp;
612
613         vector_foreach_slot (mpvec, mpp, i)
614                 sync_map_state(mpp);
615 }
616
617 static int
618 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
619 {
620         int r;
621
622         if (nopaths)
623                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
624         else
625                 r = dm_flush_map(mpp->alias);
626         /*
627          * clear references to this map before flushing so we can ignore
628          * the spurious uevent we may generate with the dm_flush_map call below
629          */
630         if (r) {
631                 /*
632                  * May not really be an error -- if the map was already flushed
633                  * from the device mapper by dmsetup(8) for instance.
634                  */
635                 if (r == 1)
636                         condlog(0, "%s: can't flush", mpp->alias);
637                 else {
638                         condlog(2, "%s: devmap deferred remove", mpp->alias);
639                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
640                 }
641                 return r;
642         }
643         else {
644                 dm_lib_release();
645                 condlog(2, "%s: map flushed", mpp->alias);
646         }
647
648         orphan_paths(vecs->pathvec, mpp);
649         remove_map_and_stop_waiter(mpp, vecs);
650
651         return 0;
652 }
653
654 static int
655 uev_add_map (struct uevent * uev, struct vectors * vecs)
656 {
657         char *alias;
658         int major = -1, minor = -1, rc;
659
660         condlog(3, "%s: add map (uevent)", uev->kernel);
661         alias = uevent_get_dm_name(uev);
662         if (!alias) {
663                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
664                 major = uevent_get_major(uev);
665                 minor = uevent_get_minor(uev);
666                 alias = dm_mapname(major, minor);
667                 if (!alias) {
668                         condlog(2, "%s: mapname not found for %d:%d",
669                                 uev->kernel, major, minor);
670                         return 1;
671                 }
672         }
673         pthread_cleanup_push(cleanup_lock, &vecs->lock);
674         lock(&vecs->lock);
675         pthread_testcancel();
676         rc = ev_add_map(uev->kernel, alias, vecs);
677         lock_cleanup_pop(vecs->lock);
678         FREE(alias);
679         return rc;
680 }
681
682 /*
683  * ev_add_map expects that the multipath device already exists in kernel
684  * before it is called. It just adds a device to multipathd or updates an
685  * existing device.
686  */
687 int
688 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
689 {
690         struct multipath * mpp;
691         int delayed_reconfig, reassign_maps;
692         struct config *conf;
693
694         if (!dm_is_mpath(alias)) {
695                 condlog(4, "%s: not a multipath map", alias);
696                 return 0;
697         }
698
699         mpp = find_mp_by_alias(vecs->mpvec, alias);
700
701         if (mpp) {
702                 if (mpp->wait_for_udev > 1) {
703                         condlog(2, "%s: performing delayed actions",
704                                 mpp->alias);
705                         if (update_map(mpp, vecs, 0))
706                                 /* setup multipathd removed the map */
707                                 return 1;
708                 }
709                 conf = get_multipath_config();
710                 delayed_reconfig = conf->delayed_reconfig;
711                 reassign_maps = conf->reassign_maps;
712                 put_multipath_config(conf);
713                 if (mpp->wait_for_udev) {
714                         mpp->wait_for_udev = 0;
715                         if (delayed_reconfig &&
716                             !need_to_delay_reconfig(vecs)) {
717                                 condlog(2, "reconfigure (delayed)");
718                                 set_config_state(DAEMON_CONFIGURE);
719                                 return 0;
720                         }
721                 }
722                 /*
723                  * Not really an error -- we generate our own uevent
724                  * if we create a multipath mapped device as a result
725                  * of uev_add_path
726                  */
727                 if (reassign_maps) {
728                         condlog(3, "%s: Reassign existing device-mapper devices",
729                                 alias);
730                         dm_reassign(alias);
731                 }
732                 return 0;
733         }
734         condlog(2, "%s: adding map", alias);
735
736         /*
737          * now we can register the map
738          */
739         if ((mpp = add_map_without_path(vecs, alias))) {
740                 sync_map_state(mpp);
741                 condlog(2, "%s: devmap %s registered", alias, dev);
742                 return 0;
743         } else {
744                 condlog(2, "%s: ev_add_map failed", dev);
745                 return 1;
746         }
747 }
748
749 static int
750 uev_remove_map (struct uevent * uev, struct vectors * vecs)
751 {
752         char *alias;
753         int minor;
754         struct multipath *mpp;
755
756         condlog(3, "%s: remove map (uevent)", uev->kernel);
757         alias = uevent_get_dm_name(uev);
758         if (!alias) {
759                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
760                 return 0;
761         }
762         minor = uevent_get_minor(uev);
763
764         pthread_cleanup_push(cleanup_lock, &vecs->lock);
765         lock(&vecs->lock);
766         pthread_testcancel();
767         mpp = find_mp_by_minor(vecs->mpvec, minor);
768
769         if (!mpp) {
770                 condlog(2, "%s: devmap not registered, can't remove",
771                         uev->kernel);
772                 goto out;
773         }
774         if (strcmp(mpp->alias, alias)) {
775                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
776                         mpp->alias, mpp->dmi->minor, minor);
777                 goto out;
778         }
779
780         orphan_paths(vecs->pathvec, mpp);
781         remove_map_and_stop_waiter(mpp, vecs);
782 out:
783         lock_cleanup_pop(vecs->lock);
784         FREE(alias);
785         return 0;
786 }
787
788 /* Called from CLI handler */
789 int
790 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
791 {
792         struct multipath * mpp;
793
794         mpp = find_mp_by_minor(vecs->mpvec, minor);
795
796         if (!mpp) {
797                 condlog(2, "%s: devmap not registered, can't remove",
798                         devname);
799                 return 1;
800         }
801         if (strcmp(mpp->alias, alias)) {
802                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
803                         mpp->alias, mpp->dmi->minor, minor);
804                 return 1;
805         }
806         return flush_map(mpp, vecs, 0);
807 }
808
809 static int
810 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
811 {
812         struct path *pp;
813         int ret = 0, i;
814         struct config *conf;
815
816         condlog(3, "%s: add path (uevent)", uev->kernel);
817         if (strstr(uev->kernel, "..") != NULL) {
818                 /*
819                  * Don't allow relative device names in the pathvec
820                  */
821                 condlog(0, "%s: path name is invalid", uev->kernel);
822                 return 1;
823         }
824
825         pthread_cleanup_push(cleanup_lock, &vecs->lock);
826         lock(&vecs->lock);
827         pthread_testcancel();
828         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
829         if (pp) {
830                 int r;
831
832                 condlog(3, "%s: spurious uevent, path already in pathvec",
833                         uev->kernel);
834                 if (!pp->mpp && !strlen(pp->wwid)) {
835                         condlog(3, "%s: reinitialize path", uev->kernel);
836                         udev_device_unref(pp->udev);
837                         pp->udev = udev_device_ref(uev->udev);
838                         conf = get_multipath_config();
839                         pthread_cleanup_push(put_multipath_config, conf);
840                         r = pathinfo(pp, conf,
841                                      DI_ALL | DI_BLACKLIST);
842                         pthread_cleanup_pop(1);
843                         if (r == PATHINFO_OK)
844                                 ret = ev_add_path(pp, vecs, need_do_map);
845                         else if (r == PATHINFO_SKIPPED) {
846                                 condlog(3, "%s: remove blacklisted path",
847                                         uev->kernel);
848                                 i = find_slot(vecs->pathvec, (void *)pp);
849                                 if (i != -1)
850                                         vector_del_slot(vecs->pathvec, i);
851                                 free_path(pp);
852                         } else {
853                                 condlog(0, "%s: failed to reinitialize path",
854                                         uev->kernel);
855                                 ret = 1;
856                         }
857                 }
858         }
859         lock_cleanup_pop(vecs->lock);
860         if (pp)
861                 return ret;
862
863         /*
864          * get path vital state
865          */
866         conf = get_multipath_config();
867         pthread_cleanup_push(put_multipath_config, conf);
868         ret = alloc_path_with_pathinfo(conf, uev->udev,
869                                        uev->wwid, DI_ALL, &pp);
870         pthread_cleanup_pop(1);
871         if (!pp) {
872                 if (ret == PATHINFO_SKIPPED)
873                         return 0;
874                 condlog(3, "%s: failed to get path info", uev->kernel);
875                 return 1;
876         }
877         pthread_cleanup_push(cleanup_lock, &vecs->lock);
878         lock(&vecs->lock);
879         pthread_testcancel();
880         ret = store_path(vecs->pathvec, pp);
881         if (!ret) {
882                 conf = get_multipath_config();
883                 pp->checkint = conf->checkint;
884                 put_multipath_config(conf);
885                 ret = ev_add_path(pp, vecs, need_do_map);
886         } else {
887                 condlog(0, "%s: failed to store path info, "
888                         "dropping event",
889                         uev->kernel);
890                 free_path(pp);
891                 ret = 1;
892         }
893         lock_cleanup_pop(vecs->lock);
894         return ret;
895 }
896
897 /*
898  * returns:
899  * 0: added
900  * 1: error
901  */
902 int
903 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
904 {
905         struct multipath * mpp;
906         char params[PARAMS_SIZE] = {0};
907         int retries = 3;
908         int start_waiter = 0;
909         int ret;
910
911         /*
912          * need path UID to go any further
913          */
914         if (strlen(pp->wwid) == 0) {
915                 condlog(0, "%s: failed to get path uid", pp->dev);
916                 goto fail; /* leave path added to pathvec */
917         }
918         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
919         if (mpp && mpp->wait_for_udev &&
920             (pathcount(mpp, PATH_UP) > 0 ||
921              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
922               mpp->ghost_delay_tick <= 0))) {
923                 /* if wait_for_udev is set and valid paths exist */
924                 condlog(3, "%s: delaying path addition until %s is fully initialized",
925                         pp->dev, mpp->alias);
926                 mpp->wait_for_udev = 2;
927                 orphan_path(pp, "waiting for create to complete");
928                 return 0;
929         }
930
931         pp->mpp = mpp;
932 rescan:
933         if (mpp) {
934                 if (pp->size && mpp->size != pp->size) {
935                         condlog(0, "%s: failed to add new path %s, "
936                                 "device size mismatch",
937                                 mpp->alias, pp->dev);
938                         int i = find_slot(vecs->pathvec, (void *)pp);
939                         if (i != -1)
940                                 vector_del_slot(vecs->pathvec, i);
941                         free_path(pp);
942                         return 1;
943                 }
944
945                 condlog(4,"%s: adopting all paths for path %s",
946                         mpp->alias, pp->dev);
947                 if (adopt_paths(vecs->pathvec, mpp))
948                         goto fail; /* leave path added to pathvec */
949
950                 verify_paths(mpp, vecs);
951                 mpp->action = ACT_RELOAD;
952                 extract_hwe_from_path(mpp);
953         } else {
954                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
955                         orphan_path(pp, "only one path");
956                         return 0;
957                 }
958                 condlog(4,"%s: creating new map", pp->dev);
959                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
960                         mpp->action = ACT_CREATE;
961                         /*
962                          * We don't depend on ACT_CREATE, as domap will
963                          * set it to ACT_NOTHING when complete.
964                          */
965                         start_waiter = 1;
966                 }
967                 if (!start_waiter)
968                         goto fail; /* leave path added to pathvec */
969         }
970
971         /* persistent reservation check*/
972         mpath_pr_event_handle(pp);
973
974         if (!need_do_map)
975                 return 0;
976
977         if (!dm_map_present(mpp->alias)) {
978                 mpp->action = ACT_CREATE;
979                 start_waiter = 1;
980         }
981         /*
982          * push the map to the device-mapper
983          */
984         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
985                 condlog(0, "%s: failed to setup map for addition of new "
986                         "path %s", mpp->alias, pp->dev);
987                 goto fail_map;
988         }
989         /*
990          * reload the map for the multipath mapped device
991          */
992 retry:
993         ret = domap(mpp, params, 1);
994         if (ret <= 0) {
995                 if (ret < 0 && retries-- > 0) {
996                         condlog(0, "%s: retry domap for addition of new "
997                                 "path %s", mpp->alias, pp->dev);
998                         sleep(1);
999                         goto retry;
1000                 }
1001                 condlog(0, "%s: failed in domap for addition of new "
1002                         "path %s", mpp->alias, pp->dev);
1003                 /*
1004                  * deal with asynchronous uevents :((
1005                  */
1006                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1007                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
1008                         sleep(1);
1009                         update_mpp_paths(mpp, vecs->pathvec);
1010                         goto rescan;
1011                 }
1012                 else if (mpp->action == ACT_RELOAD)
1013                         condlog(0, "%s: giving up reload", mpp->alias);
1014                 else
1015                         goto fail_map;
1016         }
1017         dm_lib_release();
1018
1019         if ((mpp->action == ACT_CREATE ||
1020              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1021             wait_for_events(mpp, vecs))
1022                         goto fail_map;
1023
1024         /*
1025          * update our state from kernel regardless of create or reload
1026          */
1027         if (setup_multipath(vecs, mpp))
1028                 goto fail; /* if setup_multipath fails, it removes the map */
1029
1030         sync_map_state(mpp);
1031
1032         if (retries >= 0) {
1033                 condlog(2, "%s [%s]: path added to devmap %s",
1034                         pp->dev, pp->dev_t, mpp->alias);
1035                 return 0;
1036         } else
1037                 goto fail;
1038
1039 fail_map:
1040         remove_map(mpp, vecs, 1);
1041 fail:
1042         orphan_path(pp, "failed to add path");
1043         return 1;
1044 }
1045
1046 static int
1047 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1048 {
1049         struct path *pp;
1050         int ret;
1051
1052         condlog(3, "%s: remove path (uevent)", uev->kernel);
1053         delete_foreign(uev->udev);
1054
1055         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1056         lock(&vecs->lock);
1057         pthread_testcancel();
1058         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1059         if (pp)
1060                 ret = ev_remove_path(pp, vecs, need_do_map);
1061         lock_cleanup_pop(vecs->lock);
1062         if (!pp) {
1063                 /* Not an error; path might have been purged earlier */
1064                 condlog(0, "%s: path already removed", uev->kernel);
1065                 return 0;
1066         }
1067         return ret;
1068 }
1069
1070 int
1071 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1072 {
1073         struct multipath * mpp;
1074         int i, retval = 0;
1075         char params[PARAMS_SIZE] = {0};
1076
1077         /*
1078          * avoid referring to the map of an orphaned path
1079          */
1080         if ((mpp = pp->mpp)) {
1081                 /*
1082                  * transform the mp->pg vector of vectors of paths
1083                  * into a mp->params string to feed the device-mapper
1084                  */
1085                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1086                         condlog(0, "%s: failed to update paths",
1087                                 mpp->alias);
1088                         goto fail;
1089                 }
1090
1091                 /*
1092                  * Make sure mpp->hwe doesn't point to freed memory
1093                  * We call extract_hwe_from_path() below to restore mpp->hwe
1094                  */
1095                 if (mpp->hwe == pp->hwe)
1096                         mpp->hwe = NULL;
1097
1098                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1099                         vector_del_slot(mpp->paths, i);
1100
1101                 /*
1102                  * remove the map IF removing the last path
1103                  */
1104                 if (VECTOR_SIZE(mpp->paths) == 0) {
1105                         char alias[WWID_SIZE];
1106
1107                         /*
1108                          * flush_map will fail if the device is open
1109                          */
1110                         strlcpy(alias, mpp->alias, WWID_SIZE);
1111                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1112                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1113                                 mpp->retry_tick = 0;
1114                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1115                                 mpp->disable_queueing = 1;
1116                                 mpp->stat_map_failures++;
1117                                 dm_queue_if_no_path(mpp->alias, 0);
1118                         }
1119                         if (!flush_map(mpp, vecs, 1)) {
1120                                 condlog(2, "%s: removed map after"
1121                                         " removing all paths",
1122                                         alias);
1123                                 retval = 0;
1124                                 goto out;
1125                         }
1126                         /*
1127                          * Not an error, continue
1128                          */
1129                 }
1130
1131                 if (mpp->hwe == NULL)
1132                         extract_hwe_from_path(mpp);
1133
1134                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1135                         condlog(0, "%s: failed to setup map for"
1136                                 " removal of path %s", mpp->alias, pp->dev);
1137                         goto fail;
1138                 }
1139
1140                 if (mpp->wait_for_udev) {
1141                         mpp->wait_for_udev = 2;
1142                         goto out;
1143                 }
1144
1145                 if (!need_do_map)
1146                         goto out;
1147                 /*
1148                  * reload the map
1149                  */
1150                 mpp->action = ACT_RELOAD;
1151                 if (domap(mpp, params, 1) <= 0) {
1152                         condlog(0, "%s: failed in domap for "
1153                                 "removal of path %s",
1154                                 mpp->alias, pp->dev);
1155                         retval = 1;
1156                 } else {
1157                         /*
1158                          * update our state from kernel
1159                          */
1160                         if (setup_multipath(vecs, mpp))
1161                                 return 1;
1162                         sync_map_state(mpp);
1163
1164                         condlog(2, "%s [%s]: path removed from map %s",
1165                                 pp->dev, pp->dev_t, mpp->alias);
1166                 }
1167         }
1168
1169 out:
1170         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1171                 vector_del_slot(vecs->pathvec, i);
1172
1173         free_path(pp);
1174
1175         return retval;
1176
1177 fail:
1178         remove_map_and_stop_waiter(mpp, vecs);
1179         return 1;
1180 }
1181
1182 static int
1183 uev_update_path (struct uevent *uev, struct vectors * vecs)
1184 {
1185         int ro, retval = 0, rc;
1186         struct path * pp;
1187         struct config *conf;
1188         int disable_changed_wwids;
1189         int needs_reinit = 0;
1190
1191         switch ((rc = change_foreign(uev->udev))) {
1192         case FOREIGN_OK:
1193                 /* known foreign path, ignore event */
1194                 return 0;
1195         case FOREIGN_IGNORED:
1196                 break;
1197         case FOREIGN_ERR:
1198                 condlog(3, "%s: error in change_foreign", __func__);
1199                 break;
1200         default:
1201                 condlog(1, "%s: return code %d of change_forein is unsupported",
1202                         __func__, rc);
1203                 break;
1204         }
1205
1206         conf = get_multipath_config();
1207         disable_changed_wwids = conf->disable_changed_wwids;
1208         put_multipath_config(conf);
1209
1210         ro = uevent_get_disk_ro(uev);
1211
1212         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1213         lock(&vecs->lock);
1214         pthread_testcancel();
1215
1216         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1217         if (pp) {
1218                 struct multipath *mpp = pp->mpp;
1219                 char wwid[WWID_SIZE];
1220
1221                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1222                         needs_reinit = 1;
1223                         goto out;
1224                 }
1225                 /* Don't deal with other types of failed initialization
1226                  * now. check_path will handle it */
1227                 if (!strlen(pp->wwid))
1228                         goto out;
1229
1230                 strcpy(wwid, pp->wwid);
1231                 get_uid(pp, pp->state, uev->udev);
1232
1233                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1234                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1235                                 uev->kernel, wwid, pp->wwid,
1236                                 (disable_changed_wwids ? "disallowing" :
1237                                  "continuing"));
1238                         strcpy(pp->wwid, wwid);
1239                         if (disable_changed_wwids) {
1240                                 if (!pp->wwid_changed) {
1241                                         pp->wwid_changed = 1;
1242                                         pp->tick = 1;
1243                                         if (pp->mpp)
1244                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1245                                 }
1246                                 goto out;
1247                         }
1248                 } else {
1249                         pp->wwid_changed = 0;
1250                         udev_device_unref(pp->udev);
1251                         pp->udev = udev_device_ref(uev->udev);
1252                         conf = get_multipath_config();
1253                         pthread_cleanup_push(put_multipath_config, conf);
1254                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1255                                 condlog(1, "%s: pathinfo failed after change uevent",
1256                                         uev->kernel);
1257                         pthread_cleanup_pop(1);
1258                 }
1259
1260                 if (mpp && ro >= 0) {
1261                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1262
1263                         if (mpp->wait_for_udev)
1264                                 mpp->wait_for_udev = 2;
1265                         else {
1266                                 if (ro == 1)
1267                                         pp->mpp->force_readonly = 1;
1268                                 retval = reload_map(vecs, mpp, 0, 1);
1269                                 pp->mpp->force_readonly = 0;
1270                                 condlog(2, "%s: map %s reloaded (retval %d)",
1271                                         uev->kernel, mpp->alias, retval);
1272                         }
1273                 }
1274         }
1275 out:
1276         lock_cleanup_pop(vecs->lock);
1277         if (!pp) {
1278                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1279                 if (uev->udev) {
1280                         int flag = DI_SYSFS | DI_WWID;
1281
1282                         conf = get_multipath_config();
1283                         pthread_cleanup_push(put_multipath_config, conf);
1284                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1285                         pthread_cleanup_pop(1);
1286
1287                         if (retval == PATHINFO_SKIPPED) {
1288                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1289                                 return 0;
1290                         }
1291                 }
1292
1293                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1294         }
1295         if (needs_reinit)
1296                 retval = uev_add_path(uev, vecs, 1);
1297         return retval;
1298 }
1299
1300 static int
1301 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1302 {
1303         char *action = NULL, *devt = NULL;
1304         struct path *pp;
1305         int r = 1;
1306
1307         action = uevent_get_dm_action(uev);
1308         if (!action)
1309                 return 1;
1310         if (strncmp(action, "PATH_FAILED", 11))
1311                 goto out;
1312         devt = uevent_get_dm_path(uev);
1313         if (!devt) {
1314                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1315                 goto out;
1316         }
1317
1318         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1319         lock(&vecs->lock);
1320         pthread_testcancel();
1321         pp = find_path_by_devt(vecs->pathvec, devt);
1322         if (!pp)
1323                 goto out_lock;
1324         r = io_err_stat_handle_pathfail(pp);
1325         if (r)
1326                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1327                                 pp->dev);
1328 out_lock:
1329         lock_cleanup_pop(vecs->lock);
1330         FREE(devt);
1331         FREE(action);
1332         return r;
1333 out:
1334         FREE(action);
1335         return 1;
1336 }
1337
1338 static int
1339 map_discovery (struct vectors * vecs)
1340 {
1341         struct multipath * mpp;
1342         unsigned int i;
1343
1344         if (dm_get_maps(vecs->mpvec))
1345                 return 1;
1346
1347         vector_foreach_slot (vecs->mpvec, mpp, i)
1348                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1349                     update_multipath_status(mpp)) {
1350                         remove_map(mpp, vecs, 1);
1351                         i--;
1352                 }
1353
1354         return 0;
1355 }
1356
1357 int
1358 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1359                 void * trigger_data)
1360 {
1361         struct vectors * vecs;
1362         int r;
1363
1364         *reply = NULL;
1365         *len = 0;
1366         vecs = (struct vectors *)trigger_data;
1367
1368         if ((str != NULL) && (is_root == false) &&
1369             (strncmp(str, "list", strlen("list")) != 0) &&
1370             (strncmp(str, "show", strlen("show")) != 0)) {
1371                 *reply = STRDUP("permission deny: need to be root");
1372                 if (*reply)
1373                         *len = strlen(*reply) + 1;
1374                 return 1;
1375         }
1376
1377         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1378
1379         if (r > 0) {
1380                 if (r == ETIMEDOUT)
1381                         *reply = STRDUP("timeout\n");
1382                 else
1383                         *reply = STRDUP("fail\n");
1384                 if (*reply)
1385                         *len = strlen(*reply) + 1;
1386                 r = 1;
1387         }
1388         else if (!r && *len == 0) {
1389                 *reply = STRDUP("ok\n");
1390                 if (*reply)
1391                         *len = strlen(*reply) + 1;
1392                 r = 0;
1393         }
1394         /* else if (r < 0) leave *reply alone */
1395
1396         return r;
1397 }
1398
1399 int
1400 uev_trigger (struct uevent * uev, void * trigger_data)
1401 {
1402         int r = 0;
1403         struct vectors * vecs;
1404         struct uevent *merge_uev, *tmp;
1405
1406         vecs = (struct vectors *)trigger_data;
1407
1408         pthread_cleanup_push(config_cleanup, NULL);
1409         pthread_mutex_lock(&config_lock);
1410         if (running_state != DAEMON_IDLE &&
1411             running_state != DAEMON_RUNNING)
1412                 pthread_cond_wait(&config_cond, &config_lock);
1413         pthread_cleanup_pop(1);
1414
1415         if (running_state == DAEMON_SHUTDOWN)
1416                 return 0;
1417
1418         /*
1419          * device map event
1420          * Add events are ignored here as the tables
1421          * are not fully initialised then.
1422          */
1423         if (!strncmp(uev->kernel, "dm-", 3)) {
1424                 if (!uevent_is_mpath(uev)) {
1425                         if (!strncmp(uev->action, "change", 6))
1426                                 (void)add_foreign(uev->udev);
1427                         else if (!strncmp(uev->action, "remove", 6))
1428                                 (void)delete_foreign(uev->udev);
1429                         goto out;
1430                 }
1431                 if (!strncmp(uev->action, "change", 6)) {
1432                         r = uev_add_map(uev, vecs);
1433
1434                         /*
1435                          * the kernel-side dm-mpath issues a PATH_FAILED event
1436                          * when it encounters a path IO error. It is reason-
1437                          * able be the entry of path IO error accounting pro-
1438                          * cess.
1439                          */
1440                         uev_pathfail_check(uev, vecs);
1441                 } else if (!strncmp(uev->action, "remove", 6)) {
1442                         r = uev_remove_map(uev, vecs);
1443                 }
1444                 goto out;
1445         }
1446
1447         /*
1448          * path add/remove/change event, add/remove maybe merged
1449          */
1450         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1451                 if (!strncmp(merge_uev->action, "add", 3))
1452                         r += uev_add_path(merge_uev, vecs, 0);
1453                 if (!strncmp(merge_uev->action, "remove", 6))
1454                         r += uev_remove_path(merge_uev, vecs, 0);
1455         }
1456
1457         if (!strncmp(uev->action, "add", 3))
1458                 r += uev_add_path(uev, vecs, 1);
1459         if (!strncmp(uev->action, "remove", 6))
1460                 r += uev_remove_path(uev, vecs, 1);
1461         if (!strncmp(uev->action, "change", 6))
1462                 r += uev_update_path(uev, vecs);
1463
1464 out:
1465         return r;
1466 }
1467
1468 static void rcu_unregister(void *param)
1469 {
1470         rcu_unregister_thread();
1471 }
1472
1473 static void *
1474 ueventloop (void * ap)
1475 {
1476         struct udev *udev = ap;
1477
1478         pthread_cleanup_push(rcu_unregister, NULL);
1479         rcu_register_thread();
1480         if (uevent_listen(udev))
1481                 condlog(0, "error starting uevent listener");
1482         pthread_cleanup_pop(1);
1483         return NULL;
1484 }
1485
1486 static void *
1487 uevqloop (void * ap)
1488 {
1489         pthread_cleanup_push(rcu_unregister, NULL);
1490         rcu_register_thread();
1491         if (uevent_dispatch(&uev_trigger, ap))
1492                 condlog(0, "error starting uevent dispatcher");
1493         pthread_cleanup_pop(1);
1494         return NULL;
1495 }
1496 static void *
1497 uxlsnrloop (void * ap)
1498 {
1499         if (cli_init()) {
1500                 condlog(1, "Failed to init uxsock listener");
1501                 return NULL;
1502         }
1503         pthread_cleanup_push(rcu_unregister, NULL);
1504         rcu_register_thread();
1505         set_handler_callback(LIST+PATHS, cli_list_paths);
1506         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1507         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1508         set_handler_callback(LIST+PATH, cli_list_path);
1509         set_handler_callback(LIST+MAPS, cli_list_maps);
1510         set_handler_callback(LIST+STATUS, cli_list_status);
1511         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1512         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1513         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1514         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1515         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1516         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1517         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1518         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1519         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1520         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1521         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1522         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1523         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1524         set_handler_callback(LIST+CONFIG, cli_list_config);
1525         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1526         set_handler_callback(LIST+DEVICES, cli_list_devices);
1527         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1528         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1529         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1530         set_handler_callback(ADD+PATH, cli_add_path);
1531         set_handler_callback(DEL+PATH, cli_del_path);
1532         set_handler_callback(ADD+MAP, cli_add_map);
1533         set_handler_callback(DEL+MAP, cli_del_map);
1534         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1535         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1536         set_handler_callback(SUSPEND+MAP, cli_suspend);
1537         set_handler_callback(RESUME+MAP, cli_resume);
1538         set_handler_callback(RESIZE+MAP, cli_resize);
1539         set_handler_callback(RELOAD+MAP, cli_reload);
1540         set_handler_callback(RESET+MAP, cli_reassign);
1541         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1542         set_handler_callback(FAIL+PATH, cli_fail);
1543         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1544         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1545         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1546         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1547         set_unlocked_handler_callback(QUIT, cli_quit);
1548         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1549         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1550         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1551         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1552         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1553         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1554         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1555         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1556         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1557
1558         umask(077);
1559         uxsock_listen(&uxsock_trigger, ap);
1560         pthread_cleanup_pop(1);
1561         return NULL;
1562 }
1563
1564 void
1565 exit_daemon (void)
1566 {
1567         post_config_state(DAEMON_SHUTDOWN);
1568 }
1569
1570 static void
1571 fail_path (struct path * pp, int del_active)
1572 {
1573         if (!pp->mpp)
1574                 return;
1575
1576         condlog(2, "checker failed path %s in map %s",
1577                  pp->dev_t, pp->mpp->alias);
1578
1579         dm_fail_path(pp->mpp->alias, pp->dev_t);
1580         if (del_active)
1581                 update_queue_mode_del_path(pp->mpp);
1582 }
1583
1584 /*
1585  * caller must have locked the path list before calling that function
1586  */
1587 static int
1588 reinstate_path (struct path * pp, int add_active)
1589 {
1590         int ret = 0;
1591
1592         if (!pp->mpp)
1593                 return 0;
1594
1595         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1596                 condlog(0, "%s: reinstate failed", pp->dev_t);
1597                 ret = 1;
1598         } else {
1599                 condlog(2, "%s: reinstated", pp->dev_t);
1600                 if (add_active)
1601                         update_queue_mode_add_path(pp->mpp);
1602         }
1603         return ret;
1604 }
1605
1606 static void
1607 enable_group(struct path * pp)
1608 {
1609         struct pathgroup * pgp;
1610
1611         /*
1612          * if path is added through uev_add_path, pgindex can be unset.
1613          * next update_strings() will set it, upon map reload event.
1614          *
1615          * we can safely return here, because upon map reload, all
1616          * PG will be enabled.
1617          */
1618         if (!pp->mpp->pg || !pp->pgindex)
1619                 return;
1620
1621         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1622
1623         if (pgp->status == PGSTATE_DISABLED) {
1624                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1625                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1626         }
1627 }
1628
1629 static void
1630 mpvec_garbage_collector (struct vectors * vecs)
1631 {
1632         struct multipath * mpp;
1633         unsigned int i;
1634
1635         if (!vecs->mpvec)
1636                 return;
1637
1638         vector_foreach_slot (vecs->mpvec, mpp, i) {
1639                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1640                         condlog(2, "%s: remove dead map", mpp->alias);
1641                         remove_map_and_stop_waiter(mpp, vecs);
1642                         i--;
1643                 }
1644         }
1645 }
1646
1647 /* This is called after a path has started working again. It the multipath
1648  * device for this path uses the followover failback type, and this is the
1649  * best pathgroup, and this is the first path in the pathgroup to come back
1650  * up, then switch to this pathgroup */
1651 static int
1652 followover_should_failback(struct path * pp)
1653 {
1654         struct pathgroup * pgp;
1655         struct path *pp1;
1656         int i;
1657
1658         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1659             !pp->mpp->pg || !pp->pgindex ||
1660             pp->pgindex != pp->mpp->bestpg)
1661                 return 0;
1662
1663         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1664         vector_foreach_slot(pgp->paths, pp1, i) {
1665                 if (pp1 == pp)
1666                         continue;
1667                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1668                         return 0;
1669         }
1670         return 1;
1671 }
1672
1673 static void
1674 missing_uev_wait_tick(struct vectors *vecs)
1675 {
1676         struct multipath * mpp;
1677         unsigned int i;
1678         int timed_out = 0, delayed_reconfig;
1679         struct config *conf;
1680
1681         vector_foreach_slot (vecs->mpvec, mpp, i) {
1682                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1683                         timed_out = 1;
1684                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1685                         if (mpp->wait_for_udev > 1 &&
1686                             update_map(mpp, vecs, 0)) {
1687                                 /* update_map removed map */
1688                                 i--;
1689                                 continue;
1690                         }
1691                         mpp->wait_for_udev = 0;
1692                 }
1693         }
1694
1695         conf = get_multipath_config();
1696         delayed_reconfig = conf->delayed_reconfig;
1697         put_multipath_config(conf);
1698         if (timed_out && delayed_reconfig &&
1699             !need_to_delay_reconfig(vecs)) {
1700                 condlog(2, "reconfigure (delayed)");
1701                 set_config_state(DAEMON_CONFIGURE);
1702         }
1703 }
1704
1705 static void
1706 ghost_delay_tick(struct vectors *vecs)
1707 {
1708         struct multipath * mpp;
1709         unsigned int i;
1710
1711         vector_foreach_slot (vecs->mpvec, mpp, i) {
1712                 if (mpp->ghost_delay_tick <= 0)
1713                         continue;
1714                 if (--mpp->ghost_delay_tick <= 0) {
1715                         condlog(0, "%s: timed out waiting for active path",
1716                                 mpp->alias);
1717                         mpp->force_udev_reload = 1;
1718                         if (update_map(mpp, vecs, 0) != 0) {
1719                                 /* update_map removed map */
1720                                 i--;
1721                                 continue;
1722                         }
1723                 }
1724         }
1725 }
1726
1727 static void
1728 defered_failback_tick (vector mpvec)
1729 {
1730         struct multipath * mpp;
1731         unsigned int i;
1732
1733         vector_foreach_slot (mpvec, mpp, i) {
1734                 /*
1735                  * deferred failback getting sooner
1736                  */
1737                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1738                         mpp->failback_tick--;
1739
1740                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1741                                 switch_pathgroup(mpp);
1742                 }
1743         }
1744 }
1745
1746 static void
1747 retry_count_tick(vector mpvec)
1748 {
1749         struct multipath *mpp;
1750         unsigned int i;
1751
1752         vector_foreach_slot (mpvec, mpp, i) {
1753                 if (mpp->retry_tick > 0) {
1754                         mpp->stat_total_queueing_time++;
1755                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1756                         if(--mpp->retry_tick == 0) {
1757                                 mpp->stat_map_failures++;
1758                                 dm_queue_if_no_path(mpp->alias, 0);
1759                                 condlog(2, "%s: Disable queueing", mpp->alias);
1760                         }
1761                 }
1762         }
1763 }
1764
1765 int update_prio(struct path *pp, int refresh_all)
1766 {
1767         int oldpriority;
1768         struct path *pp1;
1769         struct pathgroup * pgp;
1770         int i, j, changed = 0;
1771         struct config *conf;
1772
1773         if (refresh_all) {
1774                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1775                         vector_foreach_slot (pgp->paths, pp1, j) {
1776                                 oldpriority = pp1->priority;
1777                                 conf = get_multipath_config();
1778                                 pthread_cleanup_push(put_multipath_config,
1779                                                      conf);
1780                                 pathinfo(pp1, conf, DI_PRIO);
1781                                 pthread_cleanup_pop(1);
1782                                 if (pp1->priority != oldpriority)
1783                                         changed = 1;
1784                         }
1785                 }
1786                 return changed;
1787         }
1788         oldpriority = pp->priority;
1789         conf = get_multipath_config();
1790         pthread_cleanup_push(put_multipath_config, conf);
1791         if (pp->state != PATH_DOWN)
1792                 pathinfo(pp, conf, DI_PRIO);
1793         pthread_cleanup_pop(1);
1794
1795         if (pp->priority == oldpriority)
1796                 return 0;
1797         return 1;
1798 }
1799
1800 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1801 {
1802         if (reload_map(vecs, mpp, refresh, 1))
1803                 return 1;
1804
1805         dm_lib_release();
1806         if (setup_multipath(vecs, mpp) != 0)
1807                 return 1;
1808         sync_map_state(mpp);
1809
1810         return 0;
1811 }
1812
1813 /*
1814  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1815  * and '0' otherwise
1816  */
1817 int
1818 check_path (struct vectors * vecs, struct path * pp, int ticks)
1819 {
1820         int newstate;
1821         int new_path_up = 0;
1822         int chkr_new_path_up = 0;
1823         int add_active;
1824         int disable_reinstate = 0;
1825         int oldchkrstate = pp->chkrstate;
1826         int retrigger_tries, checkint, max_checkint, verbosity;
1827         struct config *conf;
1828         int ret;
1829
1830         if ((pp->initialized == INIT_OK ||
1831              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1832                 return 0;
1833
1834         if (pp->tick)
1835                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1836         if (pp->tick)
1837                 return 0; /* don't check this path yet */
1838
1839         conf = get_multipath_config();
1840         retrigger_tries = conf->retrigger_tries;
1841         checkint = conf->checkint;
1842         max_checkint = conf->max_checkint;
1843         verbosity = conf->verbosity;
1844         put_multipath_config(conf);
1845         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
1846                 if (pp->retriggers < retrigger_tries) {
1847                         condlog(2, "%s: triggering change event to reinitialize",
1848                                 pp->dev);
1849                         pp->initialized = INIT_REQUESTED_UDEV;
1850                         pp->retriggers++;
1851                         sysfs_attr_set_value(pp->udev, "uevent", "change",
1852                                              strlen("change"));
1853                         return 0;
1854                 } else {
1855                         condlog(1, "%s: not initialized after %d udev retriggers",
1856                                 pp->dev, retrigger_tries);
1857                         /*
1858                          * Make sure that the "add missing path" code path
1859                          * below may reinstate the path later, if it ever
1860                          * comes up again.
1861                          * The WWID needs not be cleared; if it was set, the
1862                          * state hadn't been INIT_MISSING_UDEV in the first
1863                          * place.
1864                          */
1865                         pp->initialized = INIT_FAILED;
1866                         return 0;
1867                 }
1868         }
1869
1870         /*
1871          * provision a next check soonest,
1872          * in case we exit abnormaly from here
1873          */
1874         pp->tick = checkint;
1875
1876         newstate = path_offline(pp);
1877         /*
1878          * Wait for uevent for removed paths;
1879          * some LLDDs like zfcp keep paths unavailable
1880          * without sending uevents.
1881          */
1882         if (newstate == PATH_REMOVED)
1883                 newstate = PATH_DOWN;
1884
1885         if (newstate == PATH_UP) {
1886                 conf = get_multipath_config();
1887                 pthread_cleanup_push(put_multipath_config, conf);
1888                 newstate = get_state(pp, conf, 1, newstate);
1889                 pthread_cleanup_pop(1);
1890         } else
1891                 checker_clear_message(&pp->checker);
1892
1893         if (pp->wwid_changed) {
1894                 condlog(2, "%s: path wwid has changed. Refusing to use",
1895                         pp->dev);
1896                 newstate = PATH_DOWN;
1897         }
1898
1899         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1900                 condlog(2, "%s: unusable path - checker failed", pp->dev);
1901                 LOG_MSG(2, verbosity, pp);
1902                 conf = get_multipath_config();
1903                 pthread_cleanup_push(put_multipath_config, conf);
1904                 pathinfo(pp, conf, 0);
1905                 pthread_cleanup_pop(1);
1906                 return 1;
1907         }
1908         if (!pp->mpp) {
1909                 if (!strlen(pp->wwid) && pp->initialized == INIT_FAILED &&
1910                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1911                         condlog(2, "%s: add missing path", pp->dev);
1912                         conf = get_multipath_config();
1913                         pthread_cleanup_push(put_multipath_config, conf);
1914                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1915                         pthread_cleanup_pop(1);
1916                         /* INIT_OK implies ret == PATHINFO_OK */
1917                         if (pp->initialized == INIT_OK) {
1918                                 ev_add_path(pp, vecs, 1);
1919                                 pp->tick = 1;
1920                         } else {
1921                                 /*
1922                                  * We failed multiple times to initialize this
1923                                  * path properly. Don't re-check too often.
1924                                  */
1925                                 pp->checkint = max_checkint;
1926                                 if (ret == PATHINFO_SKIPPED)
1927                                         return -1;
1928                         }
1929                 }
1930                 return 0;
1931         }
1932         /*
1933          * Async IO in flight. Keep the previous path state
1934          * and reschedule as soon as possible
1935          */
1936         if (newstate == PATH_PENDING) {
1937                 pp->tick = 1;
1938                 return 0;
1939         }
1940         /*
1941          * Synchronize with kernel state
1942          */
1943         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1944                 condlog(1, "%s: Could not synchronize with kernel state",
1945                         pp->dev);
1946                 pp->dmstate = PSTATE_UNDEF;
1947         }
1948         /* if update_multipath_strings orphaned the path, quit early */
1949         if (!pp->mpp)
1950                 return 0;
1951
1952         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1953                 pp->state = PATH_SHAKY;
1954                 /*
1955                  * to reschedule as soon as possible,so that this path can
1956                  * be recoverd in time
1957                  */
1958                 pp->tick = 1;
1959                 return 1;
1960         }
1961
1962         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1963              pp->wait_checks > 0) {
1964                 if (pp->mpp->nr_active > 0) {
1965                         pp->state = PATH_DELAYED;
1966                         pp->wait_checks--;
1967                         return 1;
1968                 } else
1969                         pp->wait_checks = 0;
1970         }
1971
1972         /*
1973          * don't reinstate failed path, if its in stand-by
1974          * and if target supports only implicit tpgs mode.
1975          * this will prevent unnecessary i/o by dm on stand-by
1976          * paths if there are no other active paths in map.
1977          */
1978         disable_reinstate = (newstate == PATH_GHOST &&
1979                             pp->mpp->nr_active == 0 &&
1980                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1981
1982         pp->chkrstate = newstate;
1983         if (newstate != pp->state) {
1984                 int oldstate = pp->state;
1985                 pp->state = newstate;
1986
1987                 LOG_MSG(1, verbosity, pp);
1988
1989                 /*
1990                  * upon state change, reset the checkint
1991                  * to the shortest delay
1992                  */
1993                 conf = get_multipath_config();
1994                 pp->checkint = conf->checkint;
1995                 put_multipath_config(conf);
1996
1997                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1998                         /*
1999                          * proactively fail path in the DM
2000                          */
2001                         if (oldstate == PATH_UP ||
2002                             oldstate == PATH_GHOST) {
2003                                 fail_path(pp, 1);
2004                                 if (pp->mpp->delay_wait_checks > 0 &&
2005                                     pp->watch_checks > 0) {
2006                                         pp->wait_checks = pp->mpp->delay_wait_checks;
2007                                         pp->watch_checks = 0;
2008                                 }
2009                         }else
2010                                 fail_path(pp, 0);
2011
2012                         /*
2013                          * cancel scheduled failback
2014                          */
2015                         pp->mpp->failback_tick = 0;
2016
2017                         pp->mpp->stat_path_failures++;
2018                         return 1;
2019                 }
2020
2021                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2022                         if (pp->mpp->prflag) {
2023                                 /*
2024                                  * Check Persistent Reservation.
2025                                  */
2026                                 condlog(2, "%s: checking persistent "
2027                                         "reservation registration", pp->dev);
2028                                 mpath_pr_event_handle(pp);
2029                         }
2030                 }
2031
2032                 /*
2033                  * reinstate this path
2034                  */
2035                 if (oldstate != PATH_UP &&
2036                     oldstate != PATH_GHOST) {
2037                         if (pp->mpp->delay_watch_checks > 0)
2038                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2039                         add_active = 1;
2040                 } else {
2041                         if (pp->watch_checks > 0)
2042                                 pp->watch_checks--;
2043                         add_active = 0;
2044                 }
2045                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2046                         condlog(3, "%s: reload map", pp->dev);
2047                         ev_add_path(pp, vecs, 1);
2048                         pp->tick = 1;
2049                         return 0;
2050                 }
2051                 new_path_up = 1;
2052
2053                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2054                         chkr_new_path_up = 1;
2055
2056                 /*
2057                  * if at least one path is up in a group, and
2058                  * the group is disabled, re-enable it
2059                  */
2060                 if (newstate == PATH_UP)
2061                         enable_group(pp);
2062         }
2063         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2064                 if ((pp->dmstate == PSTATE_FAILED ||
2065                     pp->dmstate == PSTATE_UNDEF) &&
2066                     !disable_reinstate) {
2067                         /* Clear IO errors */
2068                         if (reinstate_path(pp, 0)) {
2069                                 condlog(3, "%s: reload map", pp->dev);
2070                                 ev_add_path(pp, vecs, 1);
2071                                 pp->tick = 1;
2072                                 return 0;
2073                         }
2074                 } else {
2075                         LOG_MSG(4, verbosity, pp);
2076                         if (pp->checkint != max_checkint) {
2077                                 /*
2078                                  * double the next check delay.
2079                                  * max at conf->max_checkint
2080                                  */
2081                                 if (pp->checkint < (max_checkint / 2))
2082                                         pp->checkint = 2 * pp->checkint;
2083                                 else
2084                                         pp->checkint = max_checkint;
2085
2086                                 condlog(4, "%s: delay next check %is",
2087                                         pp->dev_t, pp->checkint);
2088                         }
2089                         if (pp->watch_checks > 0)
2090                                 pp->watch_checks--;
2091                         pp->tick = pp->checkint;
2092                 }
2093         }
2094         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2095                 if (pp->dmstate == PSTATE_ACTIVE ||
2096                     pp->dmstate == PSTATE_UNDEF)
2097                         fail_path(pp, 0);
2098                 if (newstate == PATH_DOWN) {
2099                         int log_checker_err;
2100
2101                         conf = get_multipath_config();
2102                         log_checker_err = conf->log_checker_err;
2103                         put_multipath_config(conf);
2104                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2105                                 LOG_MSG(3, verbosity, pp);
2106                         else
2107                                 LOG_MSG(2, verbosity, pp);
2108                 }
2109         }
2110
2111         pp->state = newstate;
2112
2113         if (pp->mpp->wait_for_udev)
2114                 return 1;
2115         /*
2116          * path prio refreshing
2117          */
2118         condlog(4, "path prio refresh");
2119
2120         if (update_prio(pp, new_path_up) &&
2121             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2122              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2123                 update_path_groups(pp->mpp, vecs, !new_path_up);
2124         else if (need_switch_pathgroup(pp->mpp, 0)) {
2125                 if (pp->mpp->pgfailback > 0 &&
2126                     (new_path_up || pp->mpp->failback_tick <= 0))
2127                         pp->mpp->failback_tick =
2128                                 pp->mpp->pgfailback + 1;
2129                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2130                          (chkr_new_path_up && followover_should_failback(pp)))
2131                         switch_pathgroup(pp->mpp);
2132         }
2133         return 1;
2134 }
2135
2136 static void init_path_check_interval(struct vectors *vecs)
2137 {
2138         struct config *conf;
2139         struct path *pp;
2140         unsigned int i;
2141
2142         vector_foreach_slot (vecs->pathvec, pp, i) {
2143                 conf = get_multipath_config();
2144                 pp->checkint = conf->checkint;
2145                 put_multipath_config(conf);
2146         }
2147 }
2148
2149 static void *
2150 checkerloop (void *ap)
2151 {
2152         struct vectors *vecs;
2153         struct path *pp;
2154         int count = 0;
2155         unsigned int i;
2156         struct timespec last_time;
2157         struct config *conf;
2158
2159         pthread_cleanup_push(rcu_unregister, NULL);
2160         rcu_register_thread();
2161         mlockall(MCL_CURRENT | MCL_FUTURE);
2162         vecs = (struct vectors *)ap;
2163         condlog(2, "path checkers start up");
2164
2165         /* Tweak start time for initial path check */
2166         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2167                 last_time.tv_sec = 0;
2168         else
2169                 last_time.tv_sec -= 1;
2170
2171         while (1) {
2172                 struct timespec diff_time, start_time, end_time;
2173                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2174
2175                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2176                         start_time.tv_sec = 0;
2177                 if (start_time.tv_sec && last_time.tv_sec) {
2178                         timespecsub(&start_time, &last_time, &diff_time);
2179                         condlog(4, "tick (%lu.%06lu secs)",
2180                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2181                         last_time = start_time;
2182                         ticks = diff_time.tv_sec;
2183                 } else {
2184                         ticks = 1;
2185                         condlog(4, "tick (%d ticks)", ticks);
2186                 }
2187 #ifdef USE_SYSTEMD
2188                 if (use_watchdog)
2189                         sd_notify(0, "WATCHDOG=1");
2190 #endif
2191                 rc = set_config_state(DAEMON_RUNNING);
2192                 if (rc == ETIMEDOUT) {
2193                         condlog(4, "timeout waiting for DAEMON_IDLE");
2194                         continue;
2195                 }
2196
2197                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2198                 lock(&vecs->lock);
2199                 pthread_testcancel();
2200                 vector_foreach_slot (vecs->pathvec, pp, i) {
2201                         rc = check_path(vecs, pp, ticks);
2202                         if (rc < 0) {
2203                                 vector_del_slot(vecs->pathvec, i);
2204                                 free_path(pp);
2205                                 i--;
2206                         } else
2207                                 num_paths += rc;
2208                 }
2209                 lock_cleanup_pop(vecs->lock);
2210
2211                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2212                 lock(&vecs->lock);
2213                 pthread_testcancel();
2214                 defered_failback_tick(vecs->mpvec);
2215                 retry_count_tick(vecs->mpvec);
2216                 missing_uev_wait_tick(vecs);
2217                 ghost_delay_tick(vecs);
2218                 lock_cleanup_pop(vecs->lock);
2219
2220                 if (count)
2221                         count--;
2222                 else {
2223                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2224                         lock(&vecs->lock);
2225                         pthread_testcancel();
2226                         condlog(4, "map garbage collection");
2227                         mpvec_garbage_collector(vecs);
2228                         count = MAPGCINT;
2229                         lock_cleanup_pop(vecs->lock);
2230                 }
2231
2232                 diff_time.tv_nsec = 0;
2233                 if (start_time.tv_sec &&
2234                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2235                         timespecsub(&end_time, &start_time, &diff_time);
2236                         if (num_paths) {
2237                                 unsigned int max_checkint;
2238
2239                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
2240                                         num_paths, num_paths > 1 ? "s" : "",
2241                                         diff_time.tv_sec,
2242                                         diff_time.tv_nsec / 1000);
2243                                 conf = get_multipath_config();
2244                                 max_checkint = conf->max_checkint;
2245                                 put_multipath_config(conf);
2246                                 if (diff_time.tv_sec > max_checkint)
2247                                         condlog(1, "path checkers took longer "
2248                                                 "than %lu seconds, consider "
2249                                                 "increasing max_polling_interval",
2250                                                 diff_time.tv_sec);
2251                         }
2252                 }
2253                 check_foreign();
2254                 post_config_state(DAEMON_IDLE);
2255                 conf = get_multipath_config();
2256                 strict_timing = conf->strict_timing;
2257                 put_multipath_config(conf);
2258                 if (!strict_timing)
2259                         sleep(1);
2260                 else {
2261                         if (diff_time.tv_nsec) {
2262                                 diff_time.tv_sec = 0;
2263                                 diff_time.tv_nsec =
2264                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2265                         } else
2266                                 diff_time.tv_sec = 1;
2267
2268                         condlog(3, "waiting for %lu.%06lu secs",
2269                                 diff_time.tv_sec,
2270                                 diff_time.tv_nsec / 1000);
2271                         if (nanosleep(&diff_time, NULL) != 0) {
2272                                 condlog(3, "nanosleep failed with error %d",
2273                                         errno);
2274                                 conf = get_multipath_config();
2275                                 conf->strict_timing = 0;
2276                                 put_multipath_config(conf);
2277                                 break;
2278                         }
2279                 }
2280         }
2281         pthread_cleanup_pop(1);
2282         return NULL;
2283 }
2284
2285 int
2286 configure (struct vectors * vecs)
2287 {
2288         struct multipath * mpp;
2289         struct path * pp;
2290         vector mpvec;
2291         int i, ret;
2292         struct config *conf;
2293         static int force_reload = FORCE_RELOAD_WEAK;
2294
2295         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2296                 condlog(0, "couldn't allocate path vec in configure");
2297                 return 1;
2298         }
2299
2300         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2301                 condlog(0, "couldn't allocate multipath vec in configure");
2302                 return 1;
2303         }
2304
2305         if (!(mpvec = vector_alloc())) {
2306                 condlog(0, "couldn't allocate new maps vec in configure");
2307                 return 1;
2308         }
2309
2310         /*
2311          * probe for current path (from sysfs) and map (from dm) sets
2312          */
2313         ret = path_discovery(vecs->pathvec, DI_ALL);
2314         if (ret < 0) {
2315                 condlog(0, "configure failed at path discovery");
2316                 goto fail;
2317         }
2318
2319         vector_foreach_slot (vecs->pathvec, pp, i){
2320                 conf = get_multipath_config();
2321                 pthread_cleanup_push(put_multipath_config, conf);
2322                 if (filter_path(conf, pp) > 0){
2323                         vector_del_slot(vecs->pathvec, i);
2324                         free_path(pp);
2325                         i--;
2326                 }
2327                 else
2328                         pp->checkint = conf->checkint;
2329                 pthread_cleanup_pop(1);
2330         }
2331         if (map_discovery(vecs)) {
2332                 condlog(0, "configure failed at map discovery");
2333                 goto fail;
2334         }
2335
2336         /*
2337          * create new set of maps & push changed ones into dm
2338          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2339          * superfluous ACT_RELOAD ioctls. Later calls are done
2340          * with FORCE_RELOAD_YES.
2341          */
2342         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2343         if (force_reload == FORCE_RELOAD_WEAK)
2344                 force_reload = FORCE_RELOAD_YES;
2345         if (ret) {
2346                 condlog(0, "configure failed while coalescing paths");
2347                 goto fail;
2348         }
2349
2350         /*
2351          * may need to remove some maps which are no longer relevant
2352          * e.g., due to blacklist changes in conf file
2353          */
2354         if (coalesce_maps(vecs, mpvec)) {
2355                 condlog(0, "configure failed while coalescing maps");
2356                 goto fail;
2357         }
2358
2359         dm_lib_release();
2360
2361         sync_maps_state(mpvec);
2362         vector_foreach_slot(mpvec, mpp, i){
2363                 if (remember_wwid(mpp->wwid) == 1)
2364                         trigger_paths_udev_change(mpp, true);
2365                 update_map_pr(mpp);
2366         }
2367
2368         /*
2369          * purge dm of old maps
2370          */
2371         remove_maps(vecs);
2372
2373         /*
2374          * save new set of maps formed by considering current path state
2375          */
2376         vector_free(vecs->mpvec);
2377         vecs->mpvec = mpvec;
2378
2379         /*
2380          * start dm event waiter threads for these new maps
2381          */
2382         vector_foreach_slot(vecs->mpvec, mpp, i) {
2383                 if (wait_for_events(mpp, vecs)) {
2384                         remove_map(mpp, vecs, 1);
2385                         i--;
2386                         continue;
2387                 }
2388                 if (setup_multipath(vecs, mpp))
2389                         i--;
2390         }
2391         return 0;
2392
2393 fail:
2394         vector_free(mpvec);
2395         return 1;
2396 }
2397
2398 int
2399 need_to_delay_reconfig(struct vectors * vecs)
2400 {
2401         struct multipath *mpp;
2402         int i;
2403
2404         if (!VECTOR_SIZE(vecs->mpvec))
2405                 return 0;
2406
2407         vector_foreach_slot(vecs->mpvec, mpp, i) {
2408                 if (mpp->wait_for_udev)
2409                         return 1;
2410         }
2411         return 0;
2412 }
2413
2414 void rcu_free_config(struct rcu_head *head)
2415 {
2416         struct config *conf = container_of(head, struct config, rcu);
2417
2418         free_config(conf);
2419 }
2420
2421 int
2422 reconfigure (struct vectors * vecs)
2423 {
2424         struct config * old, *conf;
2425
2426         conf = load_config(DEFAULT_CONFIGFILE);
2427         if (!conf)
2428                 return 1;
2429
2430         /*
2431          * free old map and path vectors ... they use old conf state
2432          */
2433         if (VECTOR_SIZE(vecs->mpvec))
2434                 remove_maps_and_stop_waiters(vecs);
2435
2436         free_pathvec(vecs->pathvec, FREE_PATHS);
2437         vecs->pathvec = NULL;
2438         delete_all_foreign();
2439
2440         /* Re-read any timezone changes */
2441         tzset();
2442
2443         dm_tgt_version(conf->version, TGT_MPATH);
2444         if (verbosity)
2445                 conf->verbosity = verbosity;
2446         if (bindings_read_only)
2447                 conf->bindings_read_only = bindings_read_only;
2448         uxsock_timeout = conf->uxsock_timeout;
2449
2450         old = rcu_dereference(multipath_conf);
2451         rcu_assign_pointer(multipath_conf, conf);
2452         call_rcu(&old->rcu, rcu_free_config);
2453
2454         configure(vecs);
2455
2456
2457         return 0;
2458 }
2459
2460 static struct vectors *
2461 init_vecs (void)
2462 {
2463         struct vectors * vecs;
2464
2465         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2466
2467         if (!vecs)
2468                 return NULL;
2469
2470         pthread_mutex_init(&vecs->lock.mutex, NULL);
2471
2472         return vecs;
2473 }
2474
2475 static void *
2476 signal_set(int signo, void (*func) (int))
2477 {
2478         int r;
2479         struct sigaction sig;
2480         struct sigaction osig;
2481
2482         sig.sa_handler = func;
2483         sigemptyset(&sig.sa_mask);
2484         sig.sa_flags = 0;
2485
2486         r = sigaction(signo, &sig, &osig);
2487
2488         if (r < 0)
2489                 return (SIG_ERR);
2490         else
2491                 return (osig.sa_handler);
2492 }
2493
2494 void
2495 handle_signals(bool nonfatal)
2496 {
2497         if (exit_sig) {
2498                 condlog(2, "exit (signal)");
2499                 exit_sig = 0;
2500                 exit_daemon();
2501         }
2502         if (!nonfatal)
2503                 return;
2504         if (reconfig_sig) {
2505                 condlog(2, "reconfigure (signal)");
2506                 set_config_state(DAEMON_CONFIGURE);
2507         }
2508         if (log_reset_sig) {
2509                 condlog(2, "reset log (signal)");
2510                 if (logsink == 1)
2511                         log_thread_reset();
2512         }
2513         reconfig_sig = 0;
2514         log_reset_sig = 0;
2515 }
2516
2517 static void
2518 sighup (int sig)
2519 {
2520         reconfig_sig = 1;
2521 }
2522
2523 static void
2524 sigend (int sig)
2525 {
2526         exit_sig = 1;
2527 }
2528
2529 static void
2530 sigusr1 (int sig)
2531 {
2532         log_reset_sig = 1;
2533 }
2534
2535 static void
2536 sigusr2 (int sig)
2537 {
2538         condlog(3, "SIGUSR2 received");
2539 }
2540
2541 static void
2542 signal_init(void)
2543 {
2544         sigset_t set;
2545
2546         /* block all signals */
2547         sigfillset(&set);
2548         /* SIGPIPE occurs if logging fails */
2549         sigdelset(&set, SIGPIPE);
2550         pthread_sigmask(SIG_SETMASK, &set, NULL);
2551
2552         /* Other signals will be unblocked in the uxlsnr thread */
2553         signal_set(SIGHUP, sighup);
2554         signal_set(SIGUSR1, sigusr1);
2555         signal_set(SIGUSR2, sigusr2);
2556         signal_set(SIGINT, sigend);
2557         signal_set(SIGTERM, sigend);
2558         signal_set(SIGPIPE, sigend);
2559 }
2560
2561 static void
2562 setscheduler (void)
2563 {
2564         int res;
2565         static struct sched_param sched_param = {
2566                 .sched_priority = 99
2567         };
2568
2569         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2570
2571         if (res == -1)
2572                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2573         return;
2574 }
2575
2576 static void
2577 set_oom_adj (void)
2578 {
2579 #ifdef OOM_SCORE_ADJ_MIN
2580         int retry = 1;
2581         char *file = "/proc/self/oom_score_adj";
2582         int score = OOM_SCORE_ADJ_MIN;
2583 #else
2584         int retry = 0;
2585         char *file = "/proc/self/oom_adj";
2586         int score = OOM_ADJUST_MIN;
2587 #endif
2588         FILE *fp;
2589         struct stat st;
2590         char *envp;
2591
2592         envp = getenv("OOMScoreAdjust");
2593         if (envp) {
2594                 condlog(3, "Using systemd provided OOMScoreAdjust");
2595                 return;
2596         }
2597         do {
2598                 if (stat(file, &st) == 0){
2599                         fp = fopen(file, "w");
2600                         if (!fp) {
2601                                 condlog(0, "couldn't fopen %s : %s", file,
2602                                         strerror(errno));
2603                                 return;
2604                         }
2605                         fprintf(fp, "%i", score);
2606                         fclose(fp);
2607                         return;
2608                 }
2609                 if (errno != ENOENT) {
2610                         condlog(0, "couldn't stat %s : %s", file,
2611                                 strerror(errno));
2612                         return;
2613                 }
2614 #ifdef OOM_ADJUST_MIN
2615                 file = "/proc/self/oom_adj";
2616                 score = OOM_ADJUST_MIN;
2617 #else
2618                 retry = 0;
2619 #endif
2620         } while (retry--);
2621         condlog(0, "couldn't adjust oom score");
2622 }
2623
2624 static int
2625 child (void * param)
2626 {
2627         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2628         pthread_attr_t log_attr, misc_attr, uevent_attr;
2629         struct vectors * vecs;
2630         struct multipath * mpp;
2631         int i;
2632 #ifdef USE_SYSTEMD
2633         unsigned long checkint;
2634         int startup_done = 0;
2635 #endif
2636         int rc;
2637         int pid_fd = -1;
2638         struct config *conf;
2639         char *envp;
2640         int queue_without_daemon;
2641
2642         mlockall(MCL_CURRENT | MCL_FUTURE);
2643         signal_init();
2644         rcu_init();
2645
2646         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2647         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2648         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2649         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2650
2651         if (logsink == 1) {
2652                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2653                 log_thread_start(&log_attr);
2654                 pthread_attr_destroy(&log_attr);
2655         }
2656         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2657         if (pid_fd < 0) {
2658                 condlog(1, "failed to create pidfile");
2659                 if (logsink == 1)
2660                         log_thread_stop();
2661                 exit(1);
2662         }
2663
2664         post_config_state(DAEMON_START);
2665
2666         condlog(2, "--------start up--------");
2667         condlog(2, "read " DEFAULT_CONFIGFILE);
2668
2669         conf = load_config(DEFAULT_CONFIGFILE);
2670         if (!conf)
2671                 goto failed;
2672
2673         if (verbosity)
2674                 conf->verbosity = verbosity;
2675         if (bindings_read_only)
2676                 conf->bindings_read_only = bindings_read_only;
2677         uxsock_timeout = conf->uxsock_timeout;
2678         rcu_assign_pointer(multipath_conf, conf);
2679         if (init_checkers(conf->multipath_dir)) {
2680                 condlog(0, "failed to initialize checkers");
2681                 goto failed;
2682         }
2683         if (init_prio(conf->multipath_dir)) {
2684                 condlog(0, "failed to initialize prioritizers");
2685                 goto failed;
2686         }
2687         /* Failing this is non-fatal */
2688
2689         init_foreign(conf->multipath_dir);
2690
2691         if (poll_dmevents)
2692                 poll_dmevents = dmevent_poll_supported();
2693         setlogmask(LOG_UPTO(conf->verbosity + 3));
2694
2695         envp = getenv("LimitNOFILE");
2696
2697         if (envp)
2698                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2699         else
2700                 set_max_fds(conf->max_fds);
2701
2702         vecs = gvecs = init_vecs();
2703         if (!vecs)
2704                 goto failed;
2705
2706         setscheduler();
2707         set_oom_adj();
2708
2709 #ifdef USE_SYSTEMD
2710         envp = getenv("WATCHDOG_USEC");
2711         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2712                 /* Value is in microseconds */
2713                 conf->max_checkint = checkint / 1000000;
2714                 /* Rescale checkint */
2715                 if (conf->checkint > conf->max_checkint)
2716                         conf->checkint = conf->max_checkint;
2717                 else
2718                         conf->checkint = conf->max_checkint / 4;
2719                 condlog(3, "enabling watchdog, interval %d max %d",
2720                         conf->checkint, conf->max_checkint);
2721                 use_watchdog = conf->checkint;
2722         }
2723 #endif
2724         /*
2725          * Startup done, invalidate configuration
2726          */
2727         conf = NULL;
2728
2729         /*
2730          * Signal start of configuration
2731          */
2732         post_config_state(DAEMON_CONFIGURE);
2733
2734         init_path_check_interval(vecs);
2735
2736         if (poll_dmevents) {
2737                 if (init_dmevent_waiter(vecs)) {
2738                         condlog(0, "failed to allocate dmevents waiter info");
2739                         goto failed;
2740                 }
2741                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2742                                          wait_dmevents, NULL))) {
2743                         condlog(0, "failed to create dmevent waiter thread: %d",
2744                                 rc);
2745                         goto failed;
2746                 }
2747         }
2748
2749         /*
2750          * Start uevent listener early to catch events
2751          */
2752         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2753                 condlog(0, "failed to create uevent thread: %d", rc);
2754                 goto failed;
2755         }
2756         pthread_attr_destroy(&uevent_attr);
2757         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2758                 condlog(0, "failed to create cli listener: %d", rc);
2759                 goto failed;
2760         }
2761
2762         /*
2763          * start threads
2764          */
2765         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2766                 condlog(0,"failed to create checker loop thread: %d", rc);
2767                 goto failed;
2768         }
2769         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2770                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2771                 goto failed;
2772         }
2773         pthread_attr_destroy(&misc_attr);
2774
2775         while (running_state != DAEMON_SHUTDOWN) {
2776                 pthread_cleanup_push(config_cleanup, NULL);
2777                 pthread_mutex_lock(&config_lock);
2778                 if (running_state != DAEMON_CONFIGURE &&
2779                     running_state != DAEMON_SHUTDOWN) {
2780                         pthread_cond_wait(&config_cond, &config_lock);
2781                 }
2782                 pthread_cleanup_pop(1);
2783                 if (running_state == DAEMON_CONFIGURE) {
2784                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2785                         lock(&vecs->lock);
2786                         pthread_testcancel();
2787                         if (!need_to_delay_reconfig(vecs)) {
2788                                 reconfigure(vecs);
2789                         } else {
2790                                 conf = get_multipath_config();
2791                                 conf->delayed_reconfig = 1;
2792                                 put_multipath_config(conf);
2793                         }
2794                         lock_cleanup_pop(vecs->lock);
2795                         post_config_state(DAEMON_IDLE);
2796 #ifdef USE_SYSTEMD
2797                         if (!startup_done) {
2798                                 sd_notify(0, "READY=1");
2799                                 startup_done = 1;
2800                         }
2801 #endif
2802                 }
2803         }
2804
2805         lock(&vecs->lock);
2806         conf = get_multipath_config();
2807         queue_without_daemon = conf->queue_without_daemon;
2808         put_multipath_config(conf);
2809         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2810                 vector_foreach_slot(vecs->mpvec, mpp, i)
2811                         dm_queue_if_no_path(mpp->alias, 0);
2812         remove_maps_and_stop_waiters(vecs);
2813         unlock(&vecs->lock);
2814
2815         pthread_cancel(check_thr);
2816         pthread_cancel(uevent_thr);
2817         pthread_cancel(uxlsnr_thr);
2818         pthread_cancel(uevq_thr);
2819         if (poll_dmevents)
2820                 pthread_cancel(dmevent_thr);
2821
2822         pthread_join(check_thr, NULL);
2823         pthread_join(uevent_thr, NULL);
2824         pthread_join(uxlsnr_thr, NULL);
2825         pthread_join(uevq_thr, NULL);
2826         if (poll_dmevents)
2827                 pthread_join(dmevent_thr, NULL);
2828
2829         stop_io_err_stat_thread();
2830
2831         lock(&vecs->lock);
2832         free_pathvec(vecs->pathvec, FREE_PATHS);
2833         vecs->pathvec = NULL;
2834         unlock(&vecs->lock);
2835
2836         pthread_mutex_destroy(&vecs->lock.mutex);
2837         FREE(vecs);
2838         vecs = NULL;
2839
2840         cleanup_foreign();
2841         cleanup_checkers();
2842         cleanup_prio();
2843         if (poll_dmevents)
2844                 cleanup_dmevent_waiter();
2845
2846         dm_lib_release();
2847         dm_lib_exit();
2848
2849         /* We're done here */
2850         condlog(3, "unlink pidfile");
2851         unlink(DEFAULT_PIDFILE);
2852
2853         condlog(2, "--------shut down-------");
2854
2855         if (logsink == 1)
2856                 log_thread_stop();
2857
2858         /*
2859          * Freeing config must be done after condlog() and dm_lib_exit(),
2860          * because logging functions like dlog() and dm_write_log()
2861          * reference the config.
2862          */
2863         conf = rcu_dereference(multipath_conf);
2864         rcu_assign_pointer(multipath_conf, NULL);
2865         call_rcu(&conf->rcu, rcu_free_config);
2866         udev_unref(udev);
2867         udev = NULL;
2868         pthread_attr_destroy(&waiter_attr);
2869         pthread_attr_destroy(&io_err_stat_attr);
2870 #ifdef _DEBUG_
2871         dbg_free_final(NULL);
2872 #endif
2873
2874 #ifdef USE_SYSTEMD
2875         sd_notify(0, "ERRNO=0");
2876 #endif
2877         exit(0);
2878
2879 failed:
2880 #ifdef USE_SYSTEMD
2881         sd_notify(0, "ERRNO=1");
2882 #endif
2883         if (pid_fd >= 0)
2884                 close(pid_fd);
2885         exit(1);
2886 }
2887
2888 static int
2889 daemonize(void)
2890 {
2891         int pid;
2892         int dev_null_fd;
2893
2894         if( (pid = fork()) < 0){
2895                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2896                 return -1;
2897         }
2898         else if (pid != 0)
2899                 return pid;
2900
2901         setsid();
2902
2903         if ( (pid = fork()) < 0)
2904                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2905         else if (pid != 0)
2906                 _exit(0);
2907
2908         if (chdir("/") < 0)
2909                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2910
2911         dev_null_fd = open("/dev/null", O_RDWR);
2912         if (dev_null_fd < 0){
2913                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2914                         strerror(errno));
2915                 _exit(0);
2916         }
2917
2918         close(STDIN_FILENO);
2919         if (dup(dev_null_fd) < 0) {
2920                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2921                         strerror(errno));
2922                 _exit(0);
2923         }
2924         close(STDOUT_FILENO);
2925         if (dup(dev_null_fd) < 0) {
2926                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2927                         strerror(errno));
2928                 _exit(0);
2929         }
2930         close(STDERR_FILENO);
2931         if (dup(dev_null_fd) < 0) {
2932                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2933                         strerror(errno));
2934                 _exit(0);
2935         }
2936         close(dev_null_fd);
2937         daemon_pid = getpid();
2938         return 0;
2939 }
2940
2941 int
2942 main (int argc, char *argv[])
2943 {
2944         extern char *optarg;
2945         extern int optind;
2946         int arg;
2947         int err;
2948         int foreground = 0;
2949         struct config *conf;
2950
2951         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2952                                    "Manipulated through RCU");
2953         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2954                 "Suppress complaints about unprotected running_state reads");
2955         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2956                 "Suppress complaints about this scalar variable");
2957
2958         logsink = 1;
2959
2960         if (getuid() != 0) {
2961                 fprintf(stderr, "need to be root\n");
2962                 exit(1);
2963         }
2964
2965         /* make sure we don't lock any path */
2966         if (chdir("/") < 0)
2967                 fprintf(stderr, "can't chdir to root directory : %s\n",
2968                         strerror(errno));
2969         umask(umask(077) | 022);
2970
2971         pthread_cond_init_mono(&config_cond);
2972
2973         udev = udev_new();
2974         libmp_udev_set_sync_support(0);
2975
2976         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
2977                 switch(arg) {
2978                 case 'd':
2979                         foreground = 1;
2980                         if (logsink > 0)
2981                                 logsink = 0;
2982                         //debug=1; /* ### comment me out ### */
2983                         break;
2984                 case 'v':
2985                         if (sizeof(optarg) > sizeof(char *) ||
2986                             !isdigit(optarg[0]))
2987                                 exit(1);
2988
2989                         verbosity = atoi(optarg);
2990                         break;
2991                 case 's':
2992                         logsink = -1;
2993                         break;
2994                 case 'k':
2995                         logsink = 0;
2996                         conf = load_config(DEFAULT_CONFIGFILE);
2997                         if (!conf)
2998                                 exit(1);
2999                         if (verbosity)
3000                                 conf->verbosity = verbosity;
3001                         uxsock_timeout = conf->uxsock_timeout;
3002                         err = uxclnt(optarg, uxsock_timeout + 100);
3003                         free_config(conf);
3004                         return err;
3005                 case 'B':
3006                         bindings_read_only = 1;
3007                         break;
3008                 case 'n':
3009                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3010                         break;
3011                 case 'w':
3012                         poll_dmevents = 0;
3013                         break;
3014                 default:
3015                         fprintf(stderr, "Invalid argument '-%c'\n",
3016                                 optopt);
3017                         exit(1);
3018                 }
3019         }
3020         if (optind < argc) {
3021                 char cmd[CMDSIZE];
3022                 char * s = cmd;
3023                 char * c = s;
3024
3025                 logsink = 0;
3026                 conf = load_config(DEFAULT_CONFIGFILE);
3027                 if (!conf)
3028                         exit(1);
3029                 if (verbosity)
3030                         conf->verbosity = verbosity;
3031                 uxsock_timeout = conf->uxsock_timeout;
3032                 memset(cmd, 0x0, CMDSIZE);
3033                 while (optind < argc) {
3034                         if (strchr(argv[optind], ' '))
3035                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3036                         else
3037                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3038                         optind++;
3039                 }
3040                 c += snprintf(c, s + CMDSIZE - c, "\n");
3041                 err = uxclnt(s, uxsock_timeout + 100);
3042                 free_config(conf);
3043                 return err;
3044         }
3045
3046         if (foreground) {
3047                 if (!isatty(fileno(stdout)))
3048                         setbuf(stdout, NULL);
3049                 err = 0;
3050                 daemon_pid = getpid();
3051         } else
3052                 err = daemonize();
3053
3054         if (err < 0)
3055                 /* error */
3056                 exit(1);
3057         else if (err > 0)
3058                 /* parent dies */
3059                 exit(0);
3060         else
3061                 /* child lives */
3062                 return (child(NULL));
3063 }
3064
3065 void *  mpath_pr_event_handler_fn (void * pathp )
3066 {
3067         struct multipath * mpp;
3068         int i, ret, isFound;
3069         struct path * pp = (struct path *)pathp;
3070         struct prout_param_descriptor *param;
3071         struct prin_resp *resp;
3072
3073         rcu_register_thread();
3074         mpp = pp->mpp;
3075
3076         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3077         if (!resp){
3078                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3079                 goto out;
3080         }
3081
3082         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3083         if (ret != MPATH_PR_SUCCESS )
3084         {
3085                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3086                 goto out;
3087         }
3088
3089         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3090                         resp->prin_descriptor.prin_readkeys.additional_length );
3091
3092         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3093         {
3094                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3095                 ret = MPATH_PR_SUCCESS;
3096                 goto out;
3097         }
3098         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3099                 get_be64(mpp->reservation_key));
3100
3101         isFound =0;
3102         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3103         {
3104                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3105                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3106                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3107                 {
3108                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3109                         isFound =1;
3110                         break;
3111                 }
3112         }
3113         if (!isFound)
3114         {
3115                 condlog(0, "%s: Either device not registered or ", pp->dev);
3116                 condlog(0, "host is not authorised for registration. Skip path");
3117                 ret = MPATH_PR_OTHER;
3118                 goto out;
3119         }
3120
3121         param= malloc(sizeof(struct prout_param_descriptor));
3122         memset(param, 0 , sizeof(struct prout_param_descriptor));
3123         param->sa_flags = mpp->sa_flags;
3124         memcpy(param->sa_key, &mpp->reservation_key, 8);
3125         param->num_transportid = 0;
3126
3127         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3128
3129         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3130         if (ret != MPATH_PR_SUCCESS )
3131         {
3132                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3133         }
3134         mpp->prflag = 1;
3135
3136         free(param);
3137 out:
3138         if (resp)
3139                 free(resp);
3140         rcu_unregister_thread();
3141         return NULL;
3142 }
3143
3144 int mpath_pr_event_handle(struct path *pp)
3145 {
3146         pthread_t thread;
3147         int rc;
3148         pthread_attr_t attr;
3149         struct multipath * mpp;
3150
3151         if (pp->bus != SYSFS_BUS_SCSI)
3152                 return 0;
3153
3154         mpp = pp->mpp;
3155
3156         if (!get_be64(mpp->reservation_key))
3157                 return -1;
3158
3159         pthread_attr_init(&attr);
3160         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3161
3162         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3163         if (rc) {
3164                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3165                 return -1;
3166         }
3167         pthread_attr_destroy(&attr);
3168         rc = pthread_join(thread, NULL);
3169         return 0;
3170 }