2e5f9ed4a0b7aaa0e9785ef88261ce7e58e10859
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69 #include "uxsock.h"
70
71 #include "mpath_cmd.h"
72 #include "mpath_persist.h"
73
74 #include "prioritizers/alua_rtpg.h"
75
76 #include "main.h"
77 #include "pidfile.h"
78 #include "uxlsnr.h"
79 #include "uxclnt.h"
80 #include "cli.h"
81 #include "cli_handlers.h"
82 #include "lock.h"
83 #include "waiter.h"
84 #include "dmevents.h"
85 #include "io_err_stat.h"
86 #include "wwids.h"
87 #include "foreign.h"
88 #include "../third-party/valgrind/drd.h"
89
90 #define FILE_NAME_SIZE 256
91 #define CMDSIZE 160
92
93 #define LOG_MSG(lvl, verb, pp)                                  \
94 do {                                                            \
95         if (lvl <= verb) {                                      \
96                 if (pp->offline)                                \
97                         condlog(lvl, "%s: %s - path offline",   \
98                                 pp->mpp->alias, pp->dev);       \
99                 else  {                                         \
100                         const char *__m =                       \
101                                 checker_message(&pp->checker);  \
102                                                                 \
103                         if (strlen(__m))                              \
104                                 condlog(lvl, "%s: %s - %s checker%s", \
105                                         pp->mpp->alias,               \
106                                         pp->dev,                      \
107                                         checker_name(&pp->checker),   \
108                                         __m);                         \
109                 }                                                     \
110         }                                                             \
111 } while(0)
112
113 struct mpath_event_param
114 {
115         char * devname;
116         struct multipath *mpp;
117 };
118
119 int logsink;
120 int uxsock_timeout;
121 int verbosity;
122 int bindings_read_only;
123 int ignore_new_devs;
124 #ifdef NO_DMEVENTS_POLL
125 int poll_dmevents = 0;
126 #else
127 int poll_dmevents = 1;
128 #endif
129 enum daemon_status running_state = DAEMON_INIT;
130 pid_t daemon_pid;
131 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
132 pthread_cond_t config_cond;
133
134 /*
135  * global copy of vecs for use in sig handlers
136  */
137 struct vectors * gvecs;
138
139 struct udev * udev;
140
141 struct config *multipath_conf;
142
143 /* Local variables */
144 static volatile sig_atomic_t exit_sig;
145 static volatile sig_atomic_t reconfig_sig;
146 static volatile sig_atomic_t log_reset_sig;
147
148 const char *
149 daemon_status(void)
150 {
151         switch (running_state) {
152         case DAEMON_INIT:
153                 return "init";
154         case DAEMON_START:
155                 return "startup";
156         case DAEMON_CONFIGURE:
157                 return "configure";
158         case DAEMON_IDLE:
159                 return "idle";
160         case DAEMON_RUNNING:
161                 return "running";
162         case DAEMON_SHUTDOWN:
163                 return "shutdown";
164         }
165         return NULL;
166 }
167
168 /*
169  * I love you too, systemd ...
170  */
171 const char *
172 sd_notify_status(void)
173 {
174         switch (running_state) {
175         case DAEMON_INIT:
176                 return "STATUS=init";
177         case DAEMON_START:
178                 return "STATUS=startup";
179         case DAEMON_CONFIGURE:
180                 return "STATUS=configure";
181         case DAEMON_IDLE:
182         case DAEMON_RUNNING:
183                 return "STATUS=up";
184         case DAEMON_SHUTDOWN:
185                 return "STATUS=shutdown";
186         }
187         return NULL;
188 }
189
190 #ifdef USE_SYSTEMD
191 static void do_sd_notify(enum daemon_status old_state)
192 {
193         /*
194          * Checkerloop switches back and forth between idle and running state.
195          * No need to tell systemd each time.
196          * These notifications cause a lot of overhead on dbus.
197          */
198         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
199             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
200                 return;
201         sd_notify(0, sd_notify_status());
202 }
203 #endif
204
205 static void config_cleanup(void *arg)
206 {
207         pthread_mutex_unlock(&config_lock);
208 }
209
210 static void __post_config_state(enum daemon_status state)
211 {
212         if (state != running_state && running_state != DAEMON_SHUTDOWN) {
213                 enum daemon_status old_state = running_state;
214
215                 running_state = state;
216                 pthread_cond_broadcast(&config_cond);
217 #ifdef USE_SYSTEMD
218                 do_sd_notify(old_state);
219 #endif
220         }
221 }
222
223 void post_config_state(enum daemon_status state)
224 {
225         pthread_mutex_lock(&config_lock);
226         pthread_cleanup_push(config_cleanup, NULL);
227         __post_config_state(state);
228         pthread_cleanup_pop(1);
229 }
230
231 int set_config_state(enum daemon_status state)
232 {
233         int rc = 0;
234
235         pthread_cleanup_push(config_cleanup, NULL);
236         pthread_mutex_lock(&config_lock);
237         if (running_state != state) {
238                 enum daemon_status old_state = running_state;
239
240                 if (running_state == DAEMON_SHUTDOWN)
241                         rc = EINVAL;
242                 else if (running_state != DAEMON_IDLE) {
243                         struct timespec ts;
244
245                         clock_gettime(CLOCK_MONOTONIC, &ts);
246                         ts.tv_sec += 1;
247                         rc = pthread_cond_timedwait(&config_cond,
248                                                     &config_lock, &ts);
249                 }
250                 if (!rc) {
251                         running_state = state;
252                         pthread_cond_broadcast(&config_cond);
253 #ifdef USE_SYSTEMD
254                         do_sd_notify(old_state);
255 #endif
256                 }
257         }
258         pthread_cleanup_pop(1);
259         return rc;
260 }
261
262 struct config *get_multipath_config(void)
263 {
264         rcu_read_lock();
265         return rcu_dereference(multipath_conf);
266 }
267
268 void put_multipath_config(void *arg)
269 {
270         rcu_read_unlock();
271 }
272
273 static int
274 need_switch_pathgroup (struct multipath * mpp, int refresh)
275 {
276         struct pathgroup * pgp;
277         struct path * pp;
278         unsigned int i, j;
279         struct config *conf;
280         int bestpg;
281
282         if (!mpp)
283                 return 0;
284
285         /*
286          * Refresh path priority values
287          */
288         if (refresh) {
289                 vector_foreach_slot (mpp->pg, pgp, i) {
290                         vector_foreach_slot (pgp->paths, pp, j) {
291                                 conf = get_multipath_config();
292                                 pthread_cleanup_push(put_multipath_config,
293                                                      conf);
294                                 pathinfo(pp, conf, DI_PRIO);
295                                 pthread_cleanup_pop(1);
296                         }
297                 }
298         }
299
300         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
301                 return 0;
302
303         bestpg = select_path_group(mpp);
304         if (mpp->pgfailback == -FAILBACK_MANUAL)
305                 return 0;
306
307         mpp->bestpg = bestpg;
308         if (mpp->bestpg != mpp->nextpg)
309                 return 1;
310
311         return 0;
312 }
313
314 static void
315 switch_pathgroup (struct multipath * mpp)
316 {
317         mpp->stat_switchgroup++;
318         dm_switchgroup(mpp->alias, mpp->bestpg);
319         condlog(2, "%s: switch to path group #%i",
320                  mpp->alias, mpp->bestpg);
321 }
322
323 static int
324 wait_for_events(struct multipath *mpp, struct vectors *vecs)
325 {
326         if (poll_dmevents)
327                 return watch_dmevents(mpp->alias);
328         else
329                 return start_waiter_thread(mpp, vecs);
330 }
331
332 static void
333 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
334 {
335         /* devices are automatically removed by the dmevent polling code,
336          * so they don't need to be manually removed here */
337         if (!poll_dmevents)
338                 stop_waiter_thread(mpp, vecs);
339         remove_map(mpp, vecs, PURGE_VEC);
340 }
341
342 static void
343 remove_maps_and_stop_waiters(struct vectors *vecs)
344 {
345         int i;
346         struct multipath * mpp;
347
348         if (!vecs)
349                 return;
350
351         if (!poll_dmevents) {
352                 vector_foreach_slot(vecs->mpvec, mpp, i)
353                         stop_waiter_thread(mpp, vecs);
354         }
355         else
356                 unwatch_all_dmevents();
357
358         remove_maps(vecs);
359 }
360
361 static void
362 set_multipath_wwid (struct multipath * mpp)
363 {
364         if (strlen(mpp->wwid))
365                 return;
366
367         dm_get_uuid(mpp->alias, mpp->wwid);
368 }
369
370 static void set_no_path_retry(struct multipath *mpp)
371 {
372         char is_queueing = 0;
373
374         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
375         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
376                 is_queueing = 1;
377
378         switch (mpp->no_path_retry) {
379         case NO_PATH_RETRY_UNDEF:
380                 break;
381         case NO_PATH_RETRY_FAIL:
382                 if (is_queueing)
383                         dm_queue_if_no_path(mpp->alias, 0);
384                 break;
385         case NO_PATH_RETRY_QUEUE:
386                 if (!is_queueing)
387                         dm_queue_if_no_path(mpp->alias, 1);
388                 break;
389         default:
390                 if (mpp->nr_active > 0) {
391                         mpp->retry_tick = 0;
392                         dm_queue_if_no_path(mpp->alias, 1);
393                 } else if (is_queueing && mpp->retry_tick == 0)
394                         enter_recovery_mode(mpp);
395                 break;
396         }
397 }
398
399 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
400                       int reset)
401 {
402         if (dm_get_info(mpp->alias, &mpp->dmi)) {
403                 /* Error accessing table */
404                 condlog(3, "%s: cannot access table", mpp->alias);
405                 goto out;
406         }
407
408         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
409                 condlog(0, "%s: failed to setup multipath", mpp->alias);
410                 goto out;
411         }
412
413         if (reset) {
414                 set_no_path_retry(mpp);
415                 if (VECTOR_SIZE(mpp->paths) != 0)
416                         dm_cancel_deferred_remove(mpp);
417         }
418
419         return 0;
420 out:
421         remove_map_and_stop_waiter(mpp, vecs);
422         return 1;
423 }
424
425 int update_multipath (struct vectors *vecs, char *mapname, int reset)
426 {
427         struct multipath *mpp;
428         struct pathgroup  *pgp;
429         struct path *pp;
430         int i, j;
431
432         mpp = find_mp_by_alias(vecs->mpvec, mapname);
433
434         if (!mpp) {
435                 condlog(3, "%s: multipath map not found", mapname);
436                 return 2;
437         }
438
439         if (__setup_multipath(vecs, mpp, reset))
440                 return 1; /* mpp freed in setup_multipath */
441
442         /*
443          * compare checkers states with DM states
444          */
445         vector_foreach_slot (mpp->pg, pgp, i) {
446                 vector_foreach_slot (pgp->paths, pp, j) {
447                         if (pp->dmstate != PSTATE_FAILED)
448                                 continue;
449
450                         if (pp->state != PATH_DOWN) {
451                                 struct config *conf;
452                                 int oldstate = pp->state;
453                                 int checkint;
454
455                                 conf = get_multipath_config();
456                                 checkint = conf->checkint;
457                                 put_multipath_config(conf);
458                                 condlog(2, "%s: mark as failed", pp->dev);
459                                 mpp->stat_path_failures++;
460                                 pp->state = PATH_DOWN;
461                                 if (oldstate == PATH_UP ||
462                                     oldstate == PATH_GHOST)
463                                         update_queue_mode_del_path(mpp);
464
465                                 /*
466                                  * if opportune,
467                                  * schedule the next check earlier
468                                  */
469                                 if (pp->tick > checkint)
470                                         pp->tick = checkint;
471                         }
472                 }
473         }
474         return 0;
475 }
476
477 static int
478 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
479 {
480         int retries = 3;
481         char params[PARAMS_SIZE] = {0};
482
483 retry:
484         condlog(4, "%s: updating new map", mpp->alias);
485         if (adopt_paths(vecs->pathvec, mpp)) {
486                 condlog(0, "%s: failed to adopt paths for new map update",
487                         mpp->alias);
488                 retries = -1;
489                 goto fail;
490         }
491         verify_paths(mpp, vecs);
492         mpp->action = ACT_RELOAD;
493
494         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
495                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
496                 retries = -1;
497                 goto fail;
498         }
499         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
500                 condlog(0, "%s: map_udate sleep", mpp->alias);
501                 sleep(1);
502                 goto retry;
503         }
504         dm_lib_release();
505
506 fail:
507         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
508                 condlog(0, "%s: failed to create new map", mpp->alias);
509                 remove_map(mpp, vecs, 1);
510                 return 1;
511         }
512
513         if (setup_multipath(vecs, mpp))
514                 return 1;
515
516         sync_map_state(mpp);
517
518         if (retries < 0)
519                 condlog(0, "%s: failed reload in new map update", mpp->alias);
520         return 0;
521 }
522
523 static struct multipath *
524 add_map_without_path (struct vectors *vecs, const char *alias)
525 {
526         struct multipath * mpp = alloc_multipath();
527         struct config *conf;
528
529         if (!mpp)
530                 return NULL;
531         if (!alias) {
532                 FREE(mpp);
533                 return NULL;
534         }
535
536         mpp->alias = STRDUP(alias);
537
538         if (dm_get_info(mpp->alias, &mpp->dmi)) {
539                 condlog(3, "%s: cannot access table", mpp->alias);
540                 goto out;
541         }
542         set_multipath_wwid(mpp);
543         conf = get_multipath_config();
544         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
545         put_multipath_config(conf);
546
547         if (update_multipath_table(mpp, vecs->pathvec, 1))
548                 goto out;
549         if (update_multipath_status(mpp))
550                 goto out;
551
552         if (!vector_alloc_slot(vecs->mpvec))
553                 goto out;
554
555         vector_set_slot(vecs->mpvec, mpp);
556
557         if (update_map(mpp, vecs, 1) != 0) /* map removed */
558                 return NULL;
559
560         return mpp;
561 out:
562         remove_map(mpp, vecs, PURGE_VEC);
563         return NULL;
564 }
565
566 static int
567 coalesce_maps(struct vectors *vecs, vector nmpv)
568 {
569         struct multipath * ompp;
570         vector ompv = vecs->mpvec;
571         unsigned int i, reassign_maps;
572         struct config *conf;
573
574         conf = get_multipath_config();
575         reassign_maps = conf->reassign_maps;
576         put_multipath_config(conf);
577         vector_foreach_slot (ompv, ompp, i) {
578                 condlog(3, "%s: coalesce map", ompp->alias);
579                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
580                         /*
581                          * remove all current maps not allowed by the
582                          * current configuration
583                          */
584                         if (dm_flush_map(ompp->alias)) {
585                                 condlog(0, "%s: unable to flush devmap",
586                                         ompp->alias);
587                                 /*
588                                  * may be just because the device is open
589                                  */
590                                 if (setup_multipath(vecs, ompp) != 0) {
591                                         i--;
592                                         continue;
593                                 }
594                                 if (!vector_alloc_slot(nmpv))
595                                         return 1;
596
597                                 vector_set_slot(nmpv, ompp);
598
599                                 vector_del_slot(ompv, i);
600                                 i--;
601                         }
602                         else {
603                                 dm_lib_release();
604                                 condlog(2, "%s devmap removed", ompp->alias);
605                         }
606                 } else if (reassign_maps) {
607                         condlog(3, "%s: Reassign existing device-mapper"
608                                 " devices", ompp->alias);
609                         dm_reassign(ompp->alias);
610                 }
611         }
612         return 0;
613 }
614
615 static void
616 sync_maps_state(vector mpvec)
617 {
618         unsigned int i;
619         struct multipath *mpp;
620
621         vector_foreach_slot (mpvec, mpp, i)
622                 sync_map_state(mpp);
623 }
624
625 static int
626 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
627 {
628         int r;
629
630         if (nopaths)
631                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
632         else
633                 r = dm_flush_map(mpp->alias);
634         /*
635          * clear references to this map before flushing so we can ignore
636          * the spurious uevent we may generate with the dm_flush_map call below
637          */
638         if (r) {
639                 /*
640                  * May not really be an error -- if the map was already flushed
641                  * from the device mapper by dmsetup(8) for instance.
642                  */
643                 if (r == 1)
644                         condlog(0, "%s: can't flush", mpp->alias);
645                 else {
646                         condlog(2, "%s: devmap deferred remove", mpp->alias);
647                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
648                 }
649                 return r;
650         }
651         else {
652                 dm_lib_release();
653                 condlog(2, "%s: map flushed", mpp->alias);
654         }
655
656         orphan_paths(vecs->pathvec, mpp);
657         remove_map_and_stop_waiter(mpp, vecs);
658
659         return 0;
660 }
661
662 static int
663 uev_add_map (struct uevent * uev, struct vectors * vecs)
664 {
665         char *alias;
666         int major = -1, minor = -1, rc;
667
668         condlog(3, "%s: add map (uevent)", uev->kernel);
669         alias = uevent_get_dm_name(uev);
670         if (!alias) {
671                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
672                 major = uevent_get_major(uev);
673                 minor = uevent_get_minor(uev);
674                 alias = dm_mapname(major, minor);
675                 if (!alias) {
676                         condlog(2, "%s: mapname not found for %d:%d",
677                                 uev->kernel, major, minor);
678                         return 1;
679                 }
680         }
681         pthread_cleanup_push(cleanup_lock, &vecs->lock);
682         lock(&vecs->lock);
683         pthread_testcancel();
684         rc = ev_add_map(uev->kernel, alias, vecs);
685         lock_cleanup_pop(vecs->lock);
686         FREE(alias);
687         return rc;
688 }
689
690 /*
691  * ev_add_map expects that the multipath device already exists in kernel
692  * before it is called. It just adds a device to multipathd or updates an
693  * existing device.
694  */
695 int
696 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
697 {
698         struct multipath * mpp;
699         int delayed_reconfig, reassign_maps;
700         struct config *conf;
701
702         if (!dm_is_mpath(alias)) {
703                 condlog(4, "%s: not a multipath map", alias);
704                 return 0;
705         }
706
707         mpp = find_mp_by_alias(vecs->mpvec, alias);
708
709         if (mpp) {
710                 if (mpp->wait_for_udev > 1) {
711                         condlog(2, "%s: performing delayed actions",
712                                 mpp->alias);
713                         if (update_map(mpp, vecs, 0))
714                                 /* setup multipathd removed the map */
715                                 return 1;
716                 }
717                 conf = get_multipath_config();
718                 delayed_reconfig = conf->delayed_reconfig;
719                 reassign_maps = conf->reassign_maps;
720                 put_multipath_config(conf);
721                 if (mpp->wait_for_udev) {
722                         mpp->wait_for_udev = 0;
723                         if (delayed_reconfig &&
724                             !need_to_delay_reconfig(vecs)) {
725                                 condlog(2, "reconfigure (delayed)");
726                                 set_config_state(DAEMON_CONFIGURE);
727                                 return 0;
728                         }
729                 }
730                 /*
731                  * Not really an error -- we generate our own uevent
732                  * if we create a multipath mapped device as a result
733                  * of uev_add_path
734                  */
735                 if (reassign_maps) {
736                         condlog(3, "%s: Reassign existing device-mapper devices",
737                                 alias);
738                         dm_reassign(alias);
739                 }
740                 return 0;
741         }
742         condlog(2, "%s: adding map", alias);
743
744         /*
745          * now we can register the map
746          */
747         if ((mpp = add_map_without_path(vecs, alias))) {
748                 sync_map_state(mpp);
749                 condlog(2, "%s: devmap %s registered", alias, dev);
750                 return 0;
751         } else {
752                 condlog(2, "%s: ev_add_map failed", dev);
753                 return 1;
754         }
755 }
756
757 static int
758 uev_remove_map (struct uevent * uev, struct vectors * vecs)
759 {
760         char *alias;
761         int minor;
762         struct multipath *mpp;
763
764         condlog(3, "%s: remove map (uevent)", uev->kernel);
765         alias = uevent_get_dm_name(uev);
766         if (!alias) {
767                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
768                 return 0;
769         }
770         minor = uevent_get_minor(uev);
771
772         pthread_cleanup_push(cleanup_lock, &vecs->lock);
773         lock(&vecs->lock);
774         pthread_testcancel();
775         mpp = find_mp_by_minor(vecs->mpvec, minor);
776
777         if (!mpp) {
778                 condlog(2, "%s: devmap not registered, can't remove",
779                         uev->kernel);
780                 goto out;
781         }
782         if (strcmp(mpp->alias, alias)) {
783                 condlog(2, "%s: map alias mismatch: have \"%s\", got \"%s\")",
784                         uev->kernel, mpp->alias, alias);
785                 goto out;
786         }
787
788         orphan_paths(vecs->pathvec, mpp);
789         remove_map_and_stop_waiter(mpp, vecs);
790 out:
791         lock_cleanup_pop(vecs->lock);
792         FREE(alias);
793         return 0;
794 }
795
796 /* Called from CLI handler */
797 int
798 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
799 {
800         struct multipath * mpp;
801
802         mpp = find_mp_by_minor(vecs->mpvec, minor);
803
804         if (!mpp) {
805                 condlog(2, "%s: devmap not registered, can't remove",
806                         devname);
807                 return 1;
808         }
809         if (strcmp(mpp->alias, alias)) {
810                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
811                         mpp->alias, mpp->dmi->minor, minor);
812                 return 1;
813         }
814         return flush_map(mpp, vecs, 0);
815 }
816
817 static int
818 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
819 {
820         struct path *pp;
821         int ret = 0, i;
822         struct config *conf;
823
824         condlog(3, "%s: add path (uevent)", uev->kernel);
825         if (strstr(uev->kernel, "..") != NULL) {
826                 /*
827                  * Don't allow relative device names in the pathvec
828                  */
829                 condlog(0, "%s: path name is invalid", uev->kernel);
830                 return 1;
831         }
832
833         pthread_cleanup_push(cleanup_lock, &vecs->lock);
834         lock(&vecs->lock);
835         pthread_testcancel();
836         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
837         if (pp) {
838                 int r;
839
840                 condlog(3, "%s: spurious uevent, path already in pathvec",
841                         uev->kernel);
842                 if (!pp->mpp && !strlen(pp->wwid)) {
843                         condlog(3, "%s: reinitialize path", uev->kernel);
844                         udev_device_unref(pp->udev);
845                         pp->udev = udev_device_ref(uev->udev);
846                         conf = get_multipath_config();
847                         pthread_cleanup_push(put_multipath_config, conf);
848                         r = pathinfo(pp, conf,
849                                      DI_ALL | DI_BLACKLIST);
850                         pthread_cleanup_pop(1);
851                         if (r == PATHINFO_OK)
852                                 ret = ev_add_path(pp, vecs, need_do_map);
853                         else if (r == PATHINFO_SKIPPED) {
854                                 condlog(3, "%s: remove blacklisted path",
855                                         uev->kernel);
856                                 i = find_slot(vecs->pathvec, (void *)pp);
857                                 if (i != -1)
858                                         vector_del_slot(vecs->pathvec, i);
859                                 free_path(pp);
860                         } else {
861                                 condlog(0, "%s: failed to reinitialize path",
862                                         uev->kernel);
863                                 ret = 1;
864                         }
865                 }
866         }
867         lock_cleanup_pop(vecs->lock);
868         if (pp)
869                 return ret;
870
871         /*
872          * get path vital state
873          */
874         conf = get_multipath_config();
875         pthread_cleanup_push(put_multipath_config, conf);
876         ret = alloc_path_with_pathinfo(conf, uev->udev,
877                                        uev->wwid, DI_ALL, &pp);
878         pthread_cleanup_pop(1);
879         if (!pp) {
880                 if (ret == PATHINFO_SKIPPED)
881                         return 0;
882                 condlog(3, "%s: failed to get path info", uev->kernel);
883                 return 1;
884         }
885         pthread_cleanup_push(cleanup_lock, &vecs->lock);
886         lock(&vecs->lock);
887         pthread_testcancel();
888         ret = store_path(vecs->pathvec, pp);
889         if (!ret) {
890                 conf = get_multipath_config();
891                 pp->checkint = conf->checkint;
892                 put_multipath_config(conf);
893                 ret = ev_add_path(pp, vecs, need_do_map);
894         } else {
895                 condlog(0, "%s: failed to store path info, "
896                         "dropping event",
897                         uev->kernel);
898                 free_path(pp);
899                 ret = 1;
900         }
901         lock_cleanup_pop(vecs->lock);
902         return ret;
903 }
904
905 /*
906  * returns:
907  * 0: added
908  * 1: error
909  */
910 int
911 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
912 {
913         struct multipath * mpp;
914         char params[PARAMS_SIZE] = {0};
915         int retries = 3;
916         int start_waiter = 0;
917         int ret;
918
919         /*
920          * need path UID to go any further
921          */
922         if (strlen(pp->wwid) == 0) {
923                 condlog(0, "%s: failed to get path uid", pp->dev);
924                 goto fail; /* leave path added to pathvec */
925         }
926         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
927         if (mpp && pp->size && mpp->size != pp->size) {
928                 condlog(0, "%s: failed to add new path %s, device size mismatch", mpp->alias, pp->dev);
929                 int i = find_slot(vecs->pathvec, (void *)pp);
930                 if (i != -1)
931                         vector_del_slot(vecs->pathvec, i);
932                 free_path(pp);
933                 return 1;
934         }
935         if (mpp && mpp->wait_for_udev &&
936             (pathcount(mpp, PATH_UP) > 0 ||
937              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
938               mpp->ghost_delay_tick <= 0))) {
939                 /* if wait_for_udev is set and valid paths exist */
940                 condlog(3, "%s: delaying path addition until %s is fully initialized",
941                         pp->dev, mpp->alias);
942                 mpp->wait_for_udev = 2;
943                 orphan_path(pp, "waiting for create to complete");
944                 return 0;
945         }
946
947         pp->mpp = mpp;
948 rescan:
949         if (mpp) {
950                 condlog(4,"%s: adopting all paths for path %s",
951                         mpp->alias, pp->dev);
952                 if (adopt_paths(vecs->pathvec, mpp))
953                         goto fail; /* leave path added to pathvec */
954
955                 verify_paths(mpp, vecs);
956                 mpp->action = ACT_RELOAD;
957         } else {
958                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
959                         orphan_path(pp, "only one path");
960                         return 0;
961                 }
962                 condlog(4,"%s: creating new map", pp->dev);
963                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
964                         mpp->action = ACT_CREATE;
965                         /*
966                          * We don't depend on ACT_CREATE, as domap will
967                          * set it to ACT_NOTHING when complete.
968                          */
969                         start_waiter = 1;
970                 }
971                 if (!start_waiter)
972                         goto fail; /* leave path added to pathvec */
973         }
974
975         /* persistent reservation check*/
976         mpath_pr_event_handle(pp);
977
978         if (!need_do_map)
979                 return 0;
980
981         if (!dm_map_present(mpp->alias)) {
982                 mpp->action = ACT_CREATE;
983                 start_waiter = 1;
984         }
985         /*
986          * push the map to the device-mapper
987          */
988         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
989                 condlog(0, "%s: failed to setup map for addition of new "
990                         "path %s", mpp->alias, pp->dev);
991                 goto fail_map;
992         }
993         /*
994          * reload the map for the multipath mapped device
995          */
996 retry:
997         ret = domap(mpp, params, 1);
998         if (ret <= 0) {
999                 if (ret < 0 && retries-- > 0) {
1000                         condlog(0, "%s: retry domap for addition of new "
1001                                 "path %s", mpp->alias, pp->dev);
1002                         sleep(1);
1003                         goto retry;
1004                 }
1005                 condlog(0, "%s: failed in domap for addition of new "
1006                         "path %s", mpp->alias, pp->dev);
1007                 /*
1008                  * deal with asynchronous uevents :((
1009                  */
1010                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1011                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
1012                         sleep(1);
1013                         update_mpp_paths(mpp, vecs->pathvec);
1014                         goto rescan;
1015                 }
1016                 else if (mpp->action == ACT_RELOAD)
1017                         condlog(0, "%s: giving up reload", mpp->alias);
1018                 else
1019                         goto fail_map;
1020         }
1021         dm_lib_release();
1022
1023         if ((mpp->action == ACT_CREATE ||
1024              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1025             wait_for_events(mpp, vecs))
1026                         goto fail_map;
1027
1028         /*
1029          * update our state from kernel regardless of create or reload
1030          */
1031         if (setup_multipath(vecs, mpp))
1032                 goto fail; /* if setup_multipath fails, it removes the map */
1033
1034         sync_map_state(mpp);
1035
1036         if (retries >= 0) {
1037                 condlog(2, "%s [%s]: path added to devmap %s",
1038                         pp->dev, pp->dev_t, mpp->alias);
1039                 return 0;
1040         } else
1041                 goto fail;
1042
1043 fail_map:
1044         remove_map(mpp, vecs, 1);
1045 fail:
1046         orphan_path(pp, "failed to add path");
1047         return 1;
1048 }
1049
1050 static int
1051 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1052 {
1053         struct path *pp;
1054         int ret;
1055
1056         condlog(3, "%s: remove path (uevent)", uev->kernel);
1057         delete_foreign(uev->udev);
1058
1059         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1060         lock(&vecs->lock);
1061         pthread_testcancel();
1062         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1063         if (pp)
1064                 ret = ev_remove_path(pp, vecs, need_do_map);
1065         lock_cleanup_pop(vecs->lock);
1066         if (!pp) {
1067                 /* Not an error; path might have been purged earlier */
1068                 condlog(0, "%s: path already removed", uev->kernel);
1069                 return 0;
1070         }
1071         return ret;
1072 }
1073
1074 int
1075 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1076 {
1077         struct multipath * mpp;
1078         int i, retval = 0;
1079         char params[PARAMS_SIZE] = {0};
1080
1081         /*
1082          * avoid referring to the map of an orphaned path
1083          */
1084         if ((mpp = pp->mpp)) {
1085                 /*
1086                  * transform the mp->pg vector of vectors of paths
1087                  * into a mp->params string to feed the device-mapper
1088                  */
1089                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1090                         condlog(0, "%s: failed to update paths",
1091                                 mpp->alias);
1092                         goto fail;
1093                 }
1094
1095                 /*
1096                  * Make sure mpp->hwe doesn't point to freed memory
1097                  * We call extract_hwe_from_path() below to restore mpp->hwe
1098                  */
1099                 if (mpp->hwe == pp->hwe)
1100                         mpp->hwe = NULL;
1101
1102                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1103                         vector_del_slot(mpp->paths, i);
1104
1105                 /*
1106                  * remove the map IF removing the last path
1107                  */
1108                 if (VECTOR_SIZE(mpp->paths) == 0) {
1109                         char alias[WWID_SIZE];
1110
1111                         /*
1112                          * flush_map will fail if the device is open
1113                          */
1114                         strlcpy(alias, mpp->alias, WWID_SIZE);
1115                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1116                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1117                                 mpp->retry_tick = 0;
1118                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1119                                 mpp->disable_queueing = 1;
1120                                 mpp->stat_map_failures++;
1121                                 dm_queue_if_no_path(mpp->alias, 0);
1122                         }
1123                         if (!flush_map(mpp, vecs, 1)) {
1124                                 condlog(2, "%s: removed map after"
1125                                         " removing all paths",
1126                                         alias);
1127                                 retval = 0;
1128                                 goto out;
1129                         }
1130                         /*
1131                          * Not an error, continue
1132                          */
1133                 }
1134
1135                 if (mpp->hwe == NULL)
1136                         extract_hwe_from_path(mpp);
1137
1138                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1139                         condlog(0, "%s: failed to setup map for"
1140                                 " removal of path %s", mpp->alias, pp->dev);
1141                         goto fail;
1142                 }
1143
1144                 if (mpp->wait_for_udev) {
1145                         mpp->wait_for_udev = 2;
1146                         goto out;
1147                 }
1148
1149                 if (!need_do_map)
1150                         goto out;
1151                 /*
1152                  * reload the map
1153                  */
1154                 mpp->action = ACT_RELOAD;
1155                 if (domap(mpp, params, 1) <= 0) {
1156                         condlog(0, "%s: failed in domap for "
1157                                 "removal of path %s",
1158                                 mpp->alias, pp->dev);
1159                         retval = 1;
1160                 } else {
1161                         /*
1162                          * update our state from kernel
1163                          */
1164                         if (setup_multipath(vecs, mpp))
1165                                 return 1;
1166                         sync_map_state(mpp);
1167
1168                         condlog(2, "%s [%s]: path removed from map %s",
1169                                 pp->dev, pp->dev_t, mpp->alias);
1170                 }
1171         }
1172
1173 out:
1174         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1175                 vector_del_slot(vecs->pathvec, i);
1176
1177         free_path(pp);
1178
1179         return retval;
1180
1181 fail:
1182         remove_map_and_stop_waiter(mpp, vecs);
1183         return 1;
1184 }
1185
1186 static int
1187 uev_update_path (struct uevent *uev, struct vectors * vecs)
1188 {
1189         int ro, retval = 0, rc;
1190         struct path * pp;
1191         struct config *conf;
1192         int disable_changed_wwids;
1193         int needs_reinit = 0;
1194
1195         switch ((rc = change_foreign(uev->udev))) {
1196         case FOREIGN_OK:
1197                 /* known foreign path, ignore event */
1198                 return 0;
1199         case FOREIGN_IGNORED:
1200                 break;
1201         case FOREIGN_ERR:
1202                 condlog(3, "%s: error in change_foreign", __func__);
1203                 break;
1204         default:
1205                 condlog(1, "%s: return code %d of change_forein is unsupported",
1206                         __func__, rc);
1207                 break;
1208         }
1209
1210         conf = get_multipath_config();
1211         disable_changed_wwids = conf->disable_changed_wwids;
1212         put_multipath_config(conf);
1213
1214         ro = uevent_get_disk_ro(uev);
1215
1216         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1217         lock(&vecs->lock);
1218         pthread_testcancel();
1219
1220         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1221         if (pp) {
1222                 struct multipath *mpp = pp->mpp;
1223                 char wwid[WWID_SIZE];
1224
1225                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1226                         needs_reinit = 1;
1227                         goto out;
1228                 }
1229                 /* Don't deal with other types of failed initialization
1230                  * now. check_path will handle it */
1231                 if (!strlen(pp->wwid))
1232                         goto out;
1233
1234                 strcpy(wwid, pp->wwid);
1235                 get_uid(pp, pp->state, uev->udev);
1236
1237                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1238                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1239                                 uev->kernel, wwid, pp->wwid,
1240                                 (disable_changed_wwids ? "disallowing" :
1241                                  "continuing"));
1242                         strcpy(pp->wwid, wwid);
1243                         if (disable_changed_wwids) {
1244                                 if (!pp->wwid_changed) {
1245                                         pp->wwid_changed = 1;
1246                                         pp->tick = 1;
1247                                         if (pp->mpp)
1248                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1249                                 }
1250                                 goto out;
1251                         }
1252                 } else {
1253                         pp->wwid_changed = 0;
1254                         udev_device_unref(pp->udev);
1255                         pp->udev = udev_device_ref(uev->udev);
1256                         conf = get_multipath_config();
1257                         pthread_cleanup_push(put_multipath_config, conf);
1258                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1259                                 condlog(1, "%s: pathinfo failed after change uevent",
1260                                         uev->kernel);
1261                         pthread_cleanup_pop(1);
1262                 }
1263
1264                 if (mpp && ro >= 0) {
1265                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1266
1267                         if (mpp->wait_for_udev)
1268                                 mpp->wait_for_udev = 2;
1269                         else {
1270                                 if (ro == 1)
1271                                         pp->mpp->force_readonly = 1;
1272                                 retval = reload_map(vecs, mpp, 0, 1);
1273                                 pp->mpp->force_readonly = 0;
1274                                 condlog(2, "%s: map %s reloaded (retval %d)",
1275                                         uev->kernel, mpp->alias, retval);
1276                         }
1277                 }
1278         }
1279 out:
1280         lock_cleanup_pop(vecs->lock);
1281         if (!pp) {
1282                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1283                 if (uev->udev) {
1284                         int flag = DI_SYSFS | DI_WWID;
1285
1286                         conf = get_multipath_config();
1287                         pthread_cleanup_push(put_multipath_config, conf);
1288                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1289                         pthread_cleanup_pop(1);
1290
1291                         if (retval == PATHINFO_SKIPPED) {
1292                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1293                                 return 0;
1294                         }
1295                 }
1296
1297                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1298         }
1299         if (needs_reinit)
1300                 retval = uev_add_path(uev, vecs, 1);
1301         return retval;
1302 }
1303
1304 static int
1305 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1306 {
1307         char *action = NULL, *devt = NULL;
1308         struct path *pp;
1309         int r = 1;
1310
1311         action = uevent_get_dm_action(uev);
1312         if (!action)
1313                 return 1;
1314         if (strncmp(action, "PATH_FAILED", 11))
1315                 goto out;
1316         devt = uevent_get_dm_path(uev);
1317         if (!devt) {
1318                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1319                 goto out;
1320         }
1321
1322         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1323         lock(&vecs->lock);
1324         pthread_testcancel();
1325         pp = find_path_by_devt(vecs->pathvec, devt);
1326         if (!pp)
1327                 goto out_lock;
1328         r = io_err_stat_handle_pathfail(pp);
1329         if (r)
1330                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1331                                 pp->dev);
1332 out_lock:
1333         lock_cleanup_pop(vecs->lock);
1334         FREE(devt);
1335         FREE(action);
1336         return r;
1337 out:
1338         FREE(action);
1339         return 1;
1340 }
1341
1342 static int
1343 map_discovery (struct vectors * vecs)
1344 {
1345         struct multipath * mpp;
1346         unsigned int i;
1347
1348         if (dm_get_maps(vecs->mpvec))
1349                 return 1;
1350
1351         vector_foreach_slot (vecs->mpvec, mpp, i)
1352                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1353                     update_multipath_status(mpp)) {
1354                         remove_map(mpp, vecs, 1);
1355                         i--;
1356                 }
1357
1358         return 0;
1359 }
1360
1361 int
1362 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1363                 void * trigger_data)
1364 {
1365         struct vectors * vecs;
1366         int r;
1367
1368         *reply = NULL;
1369         *len = 0;
1370         vecs = (struct vectors *)trigger_data;
1371
1372         if ((str != NULL) && (is_root == false) &&
1373             (strncmp(str, "list", strlen("list")) != 0) &&
1374             (strncmp(str, "show", strlen("show")) != 0)) {
1375                 *reply = STRDUP("permission deny: need to be root");
1376                 if (*reply)
1377                         *len = strlen(*reply) + 1;
1378                 return 1;
1379         }
1380
1381         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1382
1383         if (r > 0) {
1384                 if (r == ETIMEDOUT)
1385                         *reply = STRDUP("timeout\n");
1386                 else
1387                         *reply = STRDUP("fail\n");
1388                 if (*reply)
1389                         *len = strlen(*reply) + 1;
1390                 r = 1;
1391         }
1392         else if (!r && *len == 0) {
1393                 *reply = STRDUP("ok\n");
1394                 if (*reply)
1395                         *len = strlen(*reply) + 1;
1396                 r = 0;
1397         }
1398         /* else if (r < 0) leave *reply alone */
1399
1400         return r;
1401 }
1402
1403 int
1404 uev_trigger (struct uevent * uev, void * trigger_data)
1405 {
1406         int r = 0;
1407         struct vectors * vecs;
1408         struct uevent *merge_uev, *tmp;
1409
1410         vecs = (struct vectors *)trigger_data;
1411
1412         pthread_cleanup_push(config_cleanup, NULL);
1413         pthread_mutex_lock(&config_lock);
1414         if (running_state != DAEMON_IDLE &&
1415             running_state != DAEMON_RUNNING)
1416                 pthread_cond_wait(&config_cond, &config_lock);
1417         pthread_cleanup_pop(1);
1418
1419         if (running_state == DAEMON_SHUTDOWN)
1420                 return 0;
1421
1422         /*
1423          * device map event
1424          * Add events are ignored here as the tables
1425          * are not fully initialised then.
1426          */
1427         if (!strncmp(uev->kernel, "dm-", 3)) {
1428                 if (!uevent_is_mpath(uev)) {
1429                         if (!strncmp(uev->action, "change", 6))
1430                                 (void)add_foreign(uev->udev);
1431                         else if (!strncmp(uev->action, "remove", 6))
1432                                 (void)delete_foreign(uev->udev);
1433                         goto out;
1434                 }
1435                 if (!strncmp(uev->action, "change", 6)) {
1436                         r = uev_add_map(uev, vecs);
1437
1438                         /*
1439                          * the kernel-side dm-mpath issues a PATH_FAILED event
1440                          * when it encounters a path IO error. It is reason-
1441                          * able be the entry of path IO error accounting pro-
1442                          * cess.
1443                          */
1444                         uev_pathfail_check(uev, vecs);
1445                 } else if (!strncmp(uev->action, "remove", 6)) {
1446                         r = uev_remove_map(uev, vecs);
1447                 }
1448                 goto out;
1449         }
1450
1451         /*
1452          * path add/remove/change event, add/remove maybe merged
1453          */
1454         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1455                 if (!strncmp(merge_uev->action, "add", 3))
1456                         r += uev_add_path(merge_uev, vecs, 0);
1457                 if (!strncmp(merge_uev->action, "remove", 6))
1458                         r += uev_remove_path(merge_uev, vecs, 0);
1459         }
1460
1461         if (!strncmp(uev->action, "add", 3))
1462                 r += uev_add_path(uev, vecs, 1);
1463         if (!strncmp(uev->action, "remove", 6))
1464                 r += uev_remove_path(uev, vecs, 1);
1465         if (!strncmp(uev->action, "change", 6))
1466                 r += uev_update_path(uev, vecs);
1467
1468 out:
1469         return r;
1470 }
1471
1472 static void rcu_unregister(void *param)
1473 {
1474         rcu_unregister_thread();
1475 }
1476
1477 static void *
1478 ueventloop (void * ap)
1479 {
1480         struct udev *udev = ap;
1481
1482         pthread_cleanup_push(rcu_unregister, NULL);
1483         rcu_register_thread();
1484         if (uevent_listen(udev))
1485                 condlog(0, "error starting uevent listener");
1486         pthread_cleanup_pop(1);
1487         return NULL;
1488 }
1489
1490 static void *
1491 uevqloop (void * ap)
1492 {
1493         pthread_cleanup_push(rcu_unregister, NULL);
1494         rcu_register_thread();
1495         if (uevent_dispatch(&uev_trigger, ap))
1496                 condlog(0, "error starting uevent dispatcher");
1497         pthread_cleanup_pop(1);
1498         return NULL;
1499 }
1500 static void *
1501 uxlsnrloop (void * ap)
1502 {
1503         long ux_sock;
1504
1505         pthread_cleanup_push(rcu_unregister, NULL);
1506         rcu_register_thread();
1507
1508         ux_sock = ux_socket_listen(DEFAULT_SOCKET);
1509         if (ux_sock == -1) {
1510                 condlog(1, "could not create uxsock: %d", errno);
1511                 exit_daemon();
1512                 goto out;
1513         }
1514         pthread_cleanup_push(uxsock_cleanup, (void *)ux_sock);
1515
1516         if (cli_init()) {
1517                 condlog(1, "Failed to init uxsock listener");
1518                 exit_daemon();
1519                 goto out_sock;
1520         }
1521
1522         /* Tell main thread that thread has started */
1523         post_config_state(DAEMON_CONFIGURE);
1524
1525         set_handler_callback(LIST+PATHS, cli_list_paths);
1526         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1527         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1528         set_handler_callback(LIST+PATH, cli_list_path);
1529         set_handler_callback(LIST+MAPS, cli_list_maps);
1530         set_handler_callback(LIST+STATUS, cli_list_status);
1531         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1532         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1533         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1534         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1535         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1536         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1537         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1538         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1539         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1540         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1541         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1542         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1543         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1544         set_handler_callback(LIST+CONFIG, cli_list_config);
1545         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1546         set_handler_callback(LIST+DEVICES, cli_list_devices);
1547         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1548         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1549         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1550         set_handler_callback(ADD+PATH, cli_add_path);
1551         set_handler_callback(DEL+PATH, cli_del_path);
1552         set_handler_callback(ADD+MAP, cli_add_map);
1553         set_handler_callback(DEL+MAP, cli_del_map);
1554         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1555         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1556         set_handler_callback(SUSPEND+MAP, cli_suspend);
1557         set_handler_callback(RESUME+MAP, cli_resume);
1558         set_handler_callback(RESIZE+MAP, cli_resize);
1559         set_handler_callback(RELOAD+MAP, cli_reload);
1560         set_handler_callback(RESET+MAP, cli_reassign);
1561         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1562         set_handler_callback(FAIL+PATH, cli_fail);
1563         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1564         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1565         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1566         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1567         set_unlocked_handler_callback(QUIT, cli_quit);
1568         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1569         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1570         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1571         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1572         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1573         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1574         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1575         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1576         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1577
1578         umask(077);
1579         uxsock_listen(&uxsock_trigger, ux_sock, ap);
1580
1581 out_sock:
1582         pthread_cleanup_pop(1); /* uxsock_cleanup */
1583 out:
1584         pthread_cleanup_pop(1); /* rcu_unregister */
1585         return NULL;
1586 }
1587
1588 void
1589 exit_daemon (void)
1590 {
1591         post_config_state(DAEMON_SHUTDOWN);
1592 }
1593
1594 static void
1595 fail_path (struct path * pp, int del_active)
1596 {
1597         if (!pp->mpp)
1598                 return;
1599
1600         condlog(2, "checker failed path %s in map %s",
1601                  pp->dev_t, pp->mpp->alias);
1602
1603         dm_fail_path(pp->mpp->alias, pp->dev_t);
1604         if (del_active)
1605                 update_queue_mode_del_path(pp->mpp);
1606 }
1607
1608 /*
1609  * caller must have locked the path list before calling that function
1610  */
1611 static int
1612 reinstate_path (struct path * pp, int add_active)
1613 {
1614         int ret = 0;
1615
1616         if (!pp->mpp)
1617                 return 0;
1618
1619         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1620                 condlog(0, "%s: reinstate failed", pp->dev_t);
1621                 ret = 1;
1622         } else {
1623                 condlog(2, "%s: reinstated", pp->dev_t);
1624                 if (add_active)
1625                         update_queue_mode_add_path(pp->mpp);
1626         }
1627         return ret;
1628 }
1629
1630 static void
1631 enable_group(struct path * pp)
1632 {
1633         struct pathgroup * pgp;
1634
1635         /*
1636          * if path is added through uev_add_path, pgindex can be unset.
1637          * next update_strings() will set it, upon map reload event.
1638          *
1639          * we can safely return here, because upon map reload, all
1640          * PG will be enabled.
1641          */
1642         if (!pp->mpp->pg || !pp->pgindex)
1643                 return;
1644
1645         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1646
1647         if (pgp->status == PGSTATE_DISABLED) {
1648                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1649                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1650         }
1651 }
1652
1653 static void
1654 mpvec_garbage_collector (struct vectors * vecs)
1655 {
1656         struct multipath * mpp;
1657         unsigned int i;
1658
1659         if (!vecs->mpvec)
1660                 return;
1661
1662         vector_foreach_slot (vecs->mpvec, mpp, i) {
1663                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1664                         condlog(2, "%s: remove dead map", mpp->alias);
1665                         remove_map_and_stop_waiter(mpp, vecs);
1666                         i--;
1667                 }
1668         }
1669 }
1670
1671 /* This is called after a path has started working again. It the multipath
1672  * device for this path uses the followover failback type, and this is the
1673  * best pathgroup, and this is the first path in the pathgroup to come back
1674  * up, then switch to this pathgroup */
1675 static int
1676 followover_should_failback(struct path * pp)
1677 {
1678         struct pathgroup * pgp;
1679         struct path *pp1;
1680         int i;
1681
1682         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1683             !pp->mpp->pg || !pp->pgindex ||
1684             pp->pgindex != pp->mpp->bestpg)
1685                 return 0;
1686
1687         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1688         vector_foreach_slot(pgp->paths, pp1, i) {
1689                 if (pp1 == pp)
1690                         continue;
1691                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1692                         return 0;
1693         }
1694         return 1;
1695 }
1696
1697 static void
1698 missing_uev_wait_tick(struct vectors *vecs)
1699 {
1700         struct multipath * mpp;
1701         unsigned int i;
1702         int timed_out = 0, delayed_reconfig;
1703         struct config *conf;
1704
1705         vector_foreach_slot (vecs->mpvec, mpp, i) {
1706                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1707                         timed_out = 1;
1708                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1709                         if (mpp->wait_for_udev > 1 &&
1710                             update_map(mpp, vecs, 0)) {
1711                                 /* update_map removed map */
1712                                 i--;
1713                                 continue;
1714                         }
1715                         mpp->wait_for_udev = 0;
1716                 }
1717         }
1718
1719         conf = get_multipath_config();
1720         delayed_reconfig = conf->delayed_reconfig;
1721         put_multipath_config(conf);
1722         if (timed_out && delayed_reconfig &&
1723             !need_to_delay_reconfig(vecs)) {
1724                 condlog(2, "reconfigure (delayed)");
1725                 set_config_state(DAEMON_CONFIGURE);
1726         }
1727 }
1728
1729 static void
1730 ghost_delay_tick(struct vectors *vecs)
1731 {
1732         struct multipath * mpp;
1733         unsigned int i;
1734
1735         vector_foreach_slot (vecs->mpvec, mpp, i) {
1736                 if (mpp->ghost_delay_tick <= 0)
1737                         continue;
1738                 if (--mpp->ghost_delay_tick <= 0) {
1739                         condlog(0, "%s: timed out waiting for active path",
1740                                 mpp->alias);
1741                         mpp->force_udev_reload = 1;
1742                         if (update_map(mpp, vecs, 0) != 0) {
1743                                 /* update_map removed map */
1744                                 i--;
1745                                 continue;
1746                         }
1747                 }
1748         }
1749 }
1750
1751 static void
1752 defered_failback_tick (vector mpvec)
1753 {
1754         struct multipath * mpp;
1755         unsigned int i;
1756
1757         vector_foreach_slot (mpvec, mpp, i) {
1758                 /*
1759                  * deferred failback getting sooner
1760                  */
1761                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1762                         mpp->failback_tick--;
1763
1764                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1765                                 switch_pathgroup(mpp);
1766                 }
1767         }
1768 }
1769
1770 static void
1771 retry_count_tick(vector mpvec)
1772 {
1773         struct multipath *mpp;
1774         unsigned int i;
1775
1776         vector_foreach_slot (mpvec, mpp, i) {
1777                 if (mpp->retry_tick > 0) {
1778                         mpp->stat_total_queueing_time++;
1779                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1780                         if(--mpp->retry_tick == 0) {
1781                                 mpp->stat_map_failures++;
1782                                 dm_queue_if_no_path(mpp->alias, 0);
1783                                 condlog(2, "%s: Disable queueing", mpp->alias);
1784                         }
1785                 }
1786         }
1787 }
1788
1789 int update_prio(struct path *pp, int refresh_all)
1790 {
1791         int oldpriority;
1792         struct path *pp1;
1793         struct pathgroup * pgp;
1794         int i, j, changed = 0;
1795         struct config *conf;
1796
1797         if (refresh_all) {
1798                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1799                         vector_foreach_slot (pgp->paths, pp1, j) {
1800                                 oldpriority = pp1->priority;
1801                                 conf = get_multipath_config();
1802                                 pthread_cleanup_push(put_multipath_config,
1803                                                      conf);
1804                                 pathinfo(pp1, conf, DI_PRIO);
1805                                 pthread_cleanup_pop(1);
1806                                 if (pp1->priority != oldpriority)
1807                                         changed = 1;
1808                         }
1809                 }
1810                 return changed;
1811         }
1812         oldpriority = pp->priority;
1813         conf = get_multipath_config();
1814         pthread_cleanup_push(put_multipath_config, conf);
1815         if (pp->state != PATH_DOWN)
1816                 pathinfo(pp, conf, DI_PRIO);
1817         pthread_cleanup_pop(1);
1818
1819         if (pp->priority == oldpriority)
1820                 return 0;
1821         return 1;
1822 }
1823
1824 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1825 {
1826         if (reload_map(vecs, mpp, refresh, 1))
1827                 return 1;
1828
1829         dm_lib_release();
1830         if (setup_multipath(vecs, mpp) != 0)
1831                 return 1;
1832         sync_map_state(mpp);
1833
1834         return 0;
1835 }
1836
1837 /*
1838  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1839  * and '0' otherwise
1840  */
1841 int
1842 check_path (struct vectors * vecs, struct path * pp, int ticks)
1843 {
1844         int newstate;
1845         int new_path_up = 0;
1846         int chkr_new_path_up = 0;
1847         int add_active;
1848         int disable_reinstate = 0;
1849         int oldchkrstate = pp->chkrstate;
1850         int retrigger_tries, checkint, max_checkint, verbosity;
1851         struct config *conf;
1852         int ret;
1853
1854         if ((pp->initialized == INIT_OK ||
1855              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1856                 return 0;
1857
1858         if (pp->tick)
1859                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1860         if (pp->tick)
1861                 return 0; /* don't check this path yet */
1862
1863         conf = get_multipath_config();
1864         retrigger_tries = conf->retrigger_tries;
1865         checkint = conf->checkint;
1866         max_checkint = conf->max_checkint;
1867         verbosity = conf->verbosity;
1868         put_multipath_config(conf);
1869
1870         if (pp->checkint == CHECKINT_UNDEF) {
1871                 condlog(0, "%s: BUG: checkint is not set", pp->dev);
1872                 pp->checkint = checkint;
1873         };
1874
1875         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
1876                 if (pp->retriggers < retrigger_tries) {
1877                         condlog(2, "%s: triggering change event to reinitialize",
1878                                 pp->dev);
1879                         pp->initialized = INIT_REQUESTED_UDEV;
1880                         pp->retriggers++;
1881                         sysfs_attr_set_value(pp->udev, "uevent", "change",
1882                                              strlen("change"));
1883                         return 0;
1884                 } else {
1885                         condlog(1, "%s: not initialized after %d udev retriggers",
1886                                 pp->dev, retrigger_tries);
1887                         /*
1888                          * Make sure that the "add missing path" code path
1889                          * below may reinstate the path later, if it ever
1890                          * comes up again.
1891                          * The WWID needs not be cleared; if it was set, the
1892                          * state hadn't been INIT_MISSING_UDEV in the first
1893                          * place.
1894                          */
1895                         pp->initialized = INIT_FAILED;
1896                         return 0;
1897                 }
1898         }
1899
1900         /*
1901          * provision a next check soonest,
1902          * in case we exit abnormaly from here
1903          */
1904         pp->tick = checkint;
1905
1906         newstate = path_offline(pp);
1907         /*
1908          * Wait for uevent for removed paths;
1909          * some LLDDs like zfcp keep paths unavailable
1910          * without sending uevents.
1911          */
1912         if (newstate == PATH_REMOVED)
1913                 newstate = PATH_DOWN;
1914
1915         if (newstate == PATH_UP) {
1916                 conf = get_multipath_config();
1917                 pthread_cleanup_push(put_multipath_config, conf);
1918                 newstate = get_state(pp, conf, 1, newstate);
1919                 pthread_cleanup_pop(1);
1920         } else
1921                 checker_clear_message(&pp->checker);
1922
1923         if (pp->wwid_changed) {
1924                 condlog(2, "%s: path wwid has changed. Refusing to use",
1925                         pp->dev);
1926                 newstate = PATH_DOWN;
1927         }
1928
1929         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1930                 condlog(2, "%s: unusable path - checker failed", pp->dev);
1931                 LOG_MSG(2, verbosity, pp);
1932                 conf = get_multipath_config();
1933                 pthread_cleanup_push(put_multipath_config, conf);
1934                 pathinfo(pp, conf, 0);
1935                 pthread_cleanup_pop(1);
1936                 return 1;
1937         }
1938         if (!pp->mpp) {
1939                 if (!strlen(pp->wwid) && pp->initialized == INIT_FAILED &&
1940                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1941                         condlog(2, "%s: add missing path", pp->dev);
1942                         conf = get_multipath_config();
1943                         pthread_cleanup_push(put_multipath_config, conf);
1944                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1945                         pthread_cleanup_pop(1);
1946                         /* INIT_OK implies ret == PATHINFO_OK */
1947                         if (pp->initialized == INIT_OK) {
1948                                 ev_add_path(pp, vecs, 1);
1949                                 pp->tick = 1;
1950                         } else {
1951                                 /*
1952                                  * We failed multiple times to initialize this
1953                                  * path properly. Don't re-check too often.
1954                                  */
1955                                 pp->checkint = max_checkint;
1956                                 if (ret == PATHINFO_SKIPPED)
1957                                         return -1;
1958                         }
1959                 }
1960                 return 0;
1961         }
1962         /*
1963          * Async IO in flight. Keep the previous path state
1964          * and reschedule as soon as possible
1965          */
1966         if (newstate == PATH_PENDING) {
1967                 pp->tick = 1;
1968                 return 0;
1969         }
1970         /*
1971          * Synchronize with kernel state
1972          */
1973         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1974                 condlog(1, "%s: Could not synchronize with kernel state",
1975                         pp->dev);
1976                 pp->dmstate = PSTATE_UNDEF;
1977         }
1978         /* if update_multipath_strings orphaned the path, quit early */
1979         if (!pp->mpp)
1980                 return 0;
1981
1982         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1983                 pp->state = PATH_SHAKY;
1984                 /*
1985                  * to reschedule as soon as possible,so that this path can
1986                  * be recoverd in time
1987                  */
1988                 pp->tick = 1;
1989                 return 1;
1990         }
1991
1992         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1993              pp->wait_checks > 0) {
1994                 if (pp->mpp->nr_active > 0) {
1995                         pp->state = PATH_DELAYED;
1996                         pp->wait_checks--;
1997                         return 1;
1998                 } else
1999                         pp->wait_checks = 0;
2000         }
2001
2002         /*
2003          * don't reinstate failed path, if its in stand-by
2004          * and if target supports only implicit tpgs mode.
2005          * this will prevent unnecessary i/o by dm on stand-by
2006          * paths if there are no other active paths in map.
2007          */
2008         disable_reinstate = (newstate == PATH_GHOST &&
2009                             pp->mpp->nr_active == 0 &&
2010                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
2011
2012         pp->chkrstate = newstate;
2013         if (newstate != pp->state) {
2014                 int oldstate = pp->state;
2015                 pp->state = newstate;
2016
2017                 LOG_MSG(1, verbosity, pp);
2018
2019                 /*
2020                  * upon state change, reset the checkint
2021                  * to the shortest delay
2022                  */
2023                 conf = get_multipath_config();
2024                 pp->checkint = conf->checkint;
2025                 put_multipath_config(conf);
2026
2027                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
2028                         /*
2029                          * proactively fail path in the DM
2030                          */
2031                         if (oldstate == PATH_UP ||
2032                             oldstate == PATH_GHOST) {
2033                                 fail_path(pp, 1);
2034                                 if (pp->mpp->delay_wait_checks > 0 &&
2035                                     pp->watch_checks > 0) {
2036                                         pp->wait_checks = pp->mpp->delay_wait_checks;
2037                                         pp->watch_checks = 0;
2038                                 }
2039                         } else {
2040                                 fail_path(pp, 0);
2041                                 if (pp->wait_checks > 0)
2042                                         pp->wait_checks =
2043                                                 pp->mpp->delay_wait_checks;
2044                         }
2045
2046                         /*
2047                          * cancel scheduled failback
2048                          */
2049                         pp->mpp->failback_tick = 0;
2050
2051                         pp->mpp->stat_path_failures++;
2052                         return 1;
2053                 }
2054
2055                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2056                         if (pp->mpp->prflag) {
2057                                 /*
2058                                  * Check Persistent Reservation.
2059                                  */
2060                                 condlog(2, "%s: checking persistent "
2061                                         "reservation registration", pp->dev);
2062                                 mpath_pr_event_handle(pp);
2063                         }
2064                 }
2065
2066                 /*
2067                  * reinstate this path
2068                  */
2069                 if (oldstate != PATH_UP &&
2070                     oldstate != PATH_GHOST) {
2071                         if (pp->mpp->delay_watch_checks > 0)
2072                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2073                         add_active = 1;
2074                 } else {
2075                         if (pp->watch_checks > 0)
2076                                 pp->watch_checks--;
2077                         add_active = 0;
2078                 }
2079                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2080                         condlog(3, "%s: reload map", pp->dev);
2081                         ev_add_path(pp, vecs, 1);
2082                         pp->tick = 1;
2083                         return 0;
2084                 }
2085                 new_path_up = 1;
2086
2087                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2088                         chkr_new_path_up = 1;
2089
2090                 /*
2091                  * if at least one path is up in a group, and
2092                  * the group is disabled, re-enable it
2093                  */
2094                 if (newstate == PATH_UP)
2095                         enable_group(pp);
2096         }
2097         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2098                 if ((pp->dmstate == PSTATE_FAILED ||
2099                     pp->dmstate == PSTATE_UNDEF) &&
2100                     !disable_reinstate) {
2101                         /* Clear IO errors */
2102                         if (reinstate_path(pp, 0)) {
2103                                 condlog(3, "%s: reload map", pp->dev);
2104                                 ev_add_path(pp, vecs, 1);
2105                                 pp->tick = 1;
2106                                 return 0;
2107                         }
2108                 } else {
2109                         LOG_MSG(4, verbosity, pp);
2110                         if (pp->checkint != max_checkint) {
2111                                 /*
2112                                  * double the next check delay.
2113                                  * max at conf->max_checkint
2114                                  */
2115                                 if (pp->checkint < (max_checkint / 2))
2116                                         pp->checkint = 2 * pp->checkint;
2117                                 else
2118                                         pp->checkint = max_checkint;
2119
2120                                 condlog(4, "%s: delay next check %is",
2121                                         pp->dev_t, pp->checkint);
2122                         }
2123                         if (pp->watch_checks > 0)
2124                                 pp->watch_checks--;
2125                         pp->tick = pp->checkint;
2126                 }
2127         }
2128         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2129                 if (pp->dmstate == PSTATE_ACTIVE ||
2130                     pp->dmstate == PSTATE_UNDEF)
2131                         fail_path(pp, 0);
2132                 if (newstate == PATH_DOWN) {
2133                         int log_checker_err;
2134
2135                         conf = get_multipath_config();
2136                         log_checker_err = conf->log_checker_err;
2137                         put_multipath_config(conf);
2138                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2139                                 LOG_MSG(3, verbosity, pp);
2140                         else
2141                                 LOG_MSG(2, verbosity, pp);
2142                 }
2143         }
2144
2145         pp->state = newstate;
2146
2147         if (pp->mpp->wait_for_udev)
2148                 return 1;
2149         /*
2150          * path prio refreshing
2151          */
2152         condlog(4, "path prio refresh");
2153
2154         if (update_prio(pp, new_path_up) &&
2155             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2156              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2157                 update_path_groups(pp->mpp, vecs, !new_path_up);
2158         else if (need_switch_pathgroup(pp->mpp, 0)) {
2159                 if (pp->mpp->pgfailback > 0 &&
2160                     (new_path_up || pp->mpp->failback_tick <= 0))
2161                         pp->mpp->failback_tick =
2162                                 pp->mpp->pgfailback + 1;
2163                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2164                          (chkr_new_path_up && followover_should_failback(pp)))
2165                         switch_pathgroup(pp->mpp);
2166         }
2167         return 1;
2168 }
2169
2170 static void *
2171 checkerloop (void *ap)
2172 {
2173         struct vectors *vecs;
2174         struct path *pp;
2175         int count = 0;
2176         unsigned int i;
2177         struct timespec last_time;
2178         struct config *conf;
2179
2180         pthread_cleanup_push(rcu_unregister, NULL);
2181         rcu_register_thread();
2182         mlockall(MCL_CURRENT | MCL_FUTURE);
2183         vecs = (struct vectors *)ap;
2184         condlog(2, "path checkers start up");
2185
2186         /* Tweak start time for initial path check */
2187         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2188                 last_time.tv_sec = 0;
2189         else
2190                 last_time.tv_sec -= 1;
2191
2192         while (1) {
2193                 struct timespec diff_time, start_time, end_time;
2194                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2195
2196                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2197                         start_time.tv_sec = 0;
2198                 if (start_time.tv_sec && last_time.tv_sec) {
2199                         timespecsub(&start_time, &last_time, &diff_time);
2200                         condlog(4, "tick (%lu.%06lu secs)",
2201                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2202                         last_time = start_time;
2203                         ticks = diff_time.tv_sec;
2204                 } else {
2205                         ticks = 1;
2206                         condlog(4, "tick (%d ticks)", ticks);
2207                 }
2208 #ifdef USE_SYSTEMD
2209                 if (use_watchdog)
2210                         sd_notify(0, "WATCHDOG=1");
2211 #endif
2212                 rc = set_config_state(DAEMON_RUNNING);
2213                 if (rc == ETIMEDOUT) {
2214                         condlog(4, "timeout waiting for DAEMON_IDLE");
2215                         continue;
2216                 } else if (rc == EINVAL)
2217                         /* daemon shutdown */
2218                         break;
2219
2220                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2221                 lock(&vecs->lock);
2222                 pthread_testcancel();
2223                 vector_foreach_slot (vecs->pathvec, pp, i) {
2224                         rc = check_path(vecs, pp, ticks);
2225                         if (rc < 0) {
2226                                 vector_del_slot(vecs->pathvec, i);
2227                                 free_path(pp);
2228                                 i--;
2229                         } else
2230                                 num_paths += rc;
2231                 }
2232                 lock_cleanup_pop(vecs->lock);
2233
2234                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2235                 lock(&vecs->lock);
2236                 pthread_testcancel();
2237                 defered_failback_tick(vecs->mpvec);
2238                 retry_count_tick(vecs->mpvec);
2239                 missing_uev_wait_tick(vecs);
2240                 ghost_delay_tick(vecs);
2241                 lock_cleanup_pop(vecs->lock);
2242
2243                 if (count)
2244                         count--;
2245                 else {
2246                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2247                         lock(&vecs->lock);
2248                         pthread_testcancel();
2249                         condlog(4, "map garbage collection");
2250                         mpvec_garbage_collector(vecs);
2251                         count = MAPGCINT;
2252                         lock_cleanup_pop(vecs->lock);
2253                 }
2254
2255                 diff_time.tv_nsec = 0;
2256                 if (start_time.tv_sec &&
2257                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2258                         timespecsub(&end_time, &start_time, &diff_time);
2259                         if (num_paths) {
2260                                 unsigned int max_checkint;
2261
2262                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
2263                                         num_paths, num_paths > 1 ? "s" : "",
2264                                         diff_time.tv_sec,
2265                                         diff_time.tv_nsec / 1000);
2266                                 conf = get_multipath_config();
2267                                 max_checkint = conf->max_checkint;
2268                                 put_multipath_config(conf);
2269                                 if (diff_time.tv_sec > max_checkint)
2270                                         condlog(1, "path checkers took longer "
2271                                                 "than %lu seconds, consider "
2272                                                 "increasing max_polling_interval",
2273                                                 diff_time.tv_sec);
2274                         }
2275                 }
2276                 check_foreign();
2277                 post_config_state(DAEMON_IDLE);
2278                 conf = get_multipath_config();
2279                 strict_timing = conf->strict_timing;
2280                 put_multipath_config(conf);
2281                 if (!strict_timing)
2282                         sleep(1);
2283                 else {
2284                         if (diff_time.tv_nsec) {
2285                                 diff_time.tv_sec = 0;
2286                                 diff_time.tv_nsec =
2287                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2288                         } else
2289                                 diff_time.tv_sec = 1;
2290
2291                         condlog(3, "waiting for %lu.%06lu secs",
2292                                 diff_time.tv_sec,
2293                                 diff_time.tv_nsec / 1000);
2294                         if (nanosleep(&diff_time, NULL) != 0) {
2295                                 condlog(3, "nanosleep failed with error %d",
2296                                         errno);
2297                                 conf = get_multipath_config();
2298                                 conf->strict_timing = 0;
2299                                 put_multipath_config(conf);
2300                                 break;
2301                         }
2302                 }
2303         }
2304         pthread_cleanup_pop(1);
2305         return NULL;
2306 }
2307
2308 int
2309 configure (struct vectors * vecs)
2310 {
2311         struct multipath * mpp;
2312         struct path * pp;
2313         vector mpvec;
2314         int i, ret;
2315         struct config *conf;
2316         static int force_reload = FORCE_RELOAD_WEAK;
2317
2318         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2319                 condlog(0, "couldn't allocate path vec in configure");
2320                 return 1;
2321         }
2322
2323         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2324                 condlog(0, "couldn't allocate multipath vec in configure");
2325                 return 1;
2326         }
2327
2328         if (!(mpvec = vector_alloc())) {
2329                 condlog(0, "couldn't allocate new maps vec in configure");
2330                 return 1;
2331         }
2332
2333         /*
2334          * probe for current path (from sysfs) and map (from dm) sets
2335          */
2336         ret = path_discovery(vecs->pathvec, DI_ALL);
2337         if (ret < 0) {
2338                 condlog(0, "configure failed at path discovery");
2339                 goto fail;
2340         }
2341
2342         conf = get_multipath_config();
2343         pthread_cleanup_push(put_multipath_config, conf);
2344         vector_foreach_slot (vecs->pathvec, pp, i){
2345                 if (filter_path(conf, pp) > 0){
2346                         vector_del_slot(vecs->pathvec, i);
2347                         free_path(pp);
2348                         i--;
2349                 }
2350         }
2351         pthread_cleanup_pop(1);
2352
2353         if (map_discovery(vecs)) {
2354                 condlog(0, "configure failed at map discovery");
2355                 goto fail;
2356         }
2357
2358         /*
2359          * create new set of maps & push changed ones into dm
2360          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2361          * superfluous ACT_RELOAD ioctls. Later calls are done
2362          * with FORCE_RELOAD_YES.
2363          */
2364         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2365         if (force_reload == FORCE_RELOAD_WEAK)
2366                 force_reload = FORCE_RELOAD_YES;
2367         if (ret) {
2368                 condlog(0, "configure failed while coalescing paths");
2369                 goto fail;
2370         }
2371
2372         /*
2373          * may need to remove some maps which are no longer relevant
2374          * e.g., due to blacklist changes in conf file
2375          */
2376         if (coalesce_maps(vecs, mpvec)) {
2377                 condlog(0, "configure failed while coalescing maps");
2378                 goto fail;
2379         }
2380
2381         dm_lib_release();
2382
2383         sync_maps_state(mpvec);
2384         vector_foreach_slot(mpvec, mpp, i){
2385                 if (remember_wwid(mpp->wwid) == 1)
2386                         trigger_paths_udev_change(mpp, true);
2387                 update_map_pr(mpp);
2388         }
2389
2390         /*
2391          * purge dm of old maps
2392          */
2393         remove_maps(vecs);
2394
2395         /*
2396          * save new set of maps formed by considering current path state
2397          */
2398         vector_free(vecs->mpvec);
2399         vecs->mpvec = mpvec;
2400
2401         /*
2402          * start dm event waiter threads for these new maps
2403          */
2404         vector_foreach_slot(vecs->mpvec, mpp, i) {
2405                 if (wait_for_events(mpp, vecs)) {
2406                         remove_map(mpp, vecs, 1);
2407                         i--;
2408                         continue;
2409                 }
2410                 if (setup_multipath(vecs, mpp))
2411                         i--;
2412         }
2413         return 0;
2414
2415 fail:
2416         vector_free(mpvec);
2417         return 1;
2418 }
2419
2420 int
2421 need_to_delay_reconfig(struct vectors * vecs)
2422 {
2423         struct multipath *mpp;
2424         int i;
2425
2426         if (!VECTOR_SIZE(vecs->mpvec))
2427                 return 0;
2428
2429         vector_foreach_slot(vecs->mpvec, mpp, i) {
2430                 if (mpp->wait_for_udev)
2431                         return 1;
2432         }
2433         return 0;
2434 }
2435
2436 void rcu_free_config(struct rcu_head *head)
2437 {
2438         struct config *conf = container_of(head, struct config, rcu);
2439
2440         free_config(conf);
2441 }
2442
2443 int
2444 reconfigure (struct vectors * vecs)
2445 {
2446         struct config * old, *conf;
2447
2448         conf = load_config(DEFAULT_CONFIGFILE);
2449         if (!conf)
2450                 return 1;
2451
2452         /*
2453          * free old map and path vectors ... they use old conf state
2454          */
2455         if (VECTOR_SIZE(vecs->mpvec))
2456                 remove_maps_and_stop_waiters(vecs);
2457
2458         free_pathvec(vecs->pathvec, FREE_PATHS);
2459         vecs->pathvec = NULL;
2460         delete_all_foreign();
2461
2462         /* Re-read any timezone changes */
2463         tzset();
2464
2465         dm_tgt_version(conf->version, TGT_MPATH);
2466         if (verbosity)
2467                 conf->verbosity = verbosity;
2468         if (bindings_read_only)
2469                 conf->bindings_read_only = bindings_read_only;
2470         uxsock_timeout = conf->uxsock_timeout;
2471
2472         old = rcu_dereference(multipath_conf);
2473         rcu_assign_pointer(multipath_conf, conf);
2474         call_rcu(&old->rcu, rcu_free_config);
2475
2476         configure(vecs);
2477
2478
2479         return 0;
2480 }
2481
2482 static struct vectors *
2483 init_vecs (void)
2484 {
2485         struct vectors * vecs;
2486
2487         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2488
2489         if (!vecs)
2490                 return NULL;
2491
2492         pthread_mutex_init(&vecs->lock.mutex, NULL);
2493
2494         return vecs;
2495 }
2496
2497 static void *
2498 signal_set(int signo, void (*func) (int))
2499 {
2500         int r;
2501         struct sigaction sig;
2502         struct sigaction osig;
2503
2504         sig.sa_handler = func;
2505         sigemptyset(&sig.sa_mask);
2506         sig.sa_flags = 0;
2507
2508         r = sigaction(signo, &sig, &osig);
2509
2510         if (r < 0)
2511                 return (SIG_ERR);
2512         else
2513                 return (osig.sa_handler);
2514 }
2515
2516 void
2517 handle_signals(bool nonfatal)
2518 {
2519         if (exit_sig) {
2520                 condlog(2, "exit (signal)");
2521                 exit_sig = 0;
2522                 exit_daemon();
2523         }
2524         if (!nonfatal)
2525                 return;
2526         if (reconfig_sig) {
2527                 condlog(2, "reconfigure (signal)");
2528                 set_config_state(DAEMON_CONFIGURE);
2529         }
2530         if (log_reset_sig) {
2531                 condlog(2, "reset log (signal)");
2532                 if (logsink == 1)
2533                         log_thread_reset();
2534         }
2535         reconfig_sig = 0;
2536         log_reset_sig = 0;
2537 }
2538
2539 static void
2540 sighup (int sig)
2541 {
2542         reconfig_sig = 1;
2543 }
2544
2545 static void
2546 sigend (int sig)
2547 {
2548         exit_sig = 1;
2549 }
2550
2551 static void
2552 sigusr1 (int sig)
2553 {
2554         log_reset_sig = 1;
2555 }
2556
2557 static void
2558 sigusr2 (int sig)
2559 {
2560         condlog(3, "SIGUSR2 received");
2561 }
2562
2563 static void
2564 signal_init(void)
2565 {
2566         sigset_t set;
2567
2568         /* block all signals */
2569         sigfillset(&set);
2570         /* SIGPIPE occurs if logging fails */
2571         sigdelset(&set, SIGPIPE);
2572         pthread_sigmask(SIG_SETMASK, &set, NULL);
2573
2574         /* Other signals will be unblocked in the uxlsnr thread */
2575         signal_set(SIGHUP, sighup);
2576         signal_set(SIGUSR1, sigusr1);
2577         signal_set(SIGUSR2, sigusr2);
2578         signal_set(SIGINT, sigend);
2579         signal_set(SIGTERM, sigend);
2580         signal_set(SIGPIPE, sigend);
2581 }
2582
2583 static void
2584 setscheduler (void)
2585 {
2586         int res;
2587         static struct sched_param sched_param = {
2588                 .sched_priority = 99
2589         };
2590
2591         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2592
2593         if (res == -1)
2594                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2595         return;
2596 }
2597
2598 static void
2599 set_oom_adj (void)
2600 {
2601 #ifdef OOM_SCORE_ADJ_MIN
2602         int retry = 1;
2603         char *file = "/proc/self/oom_score_adj";
2604         int score = OOM_SCORE_ADJ_MIN;
2605 #else
2606         int retry = 0;
2607         char *file = "/proc/self/oom_adj";
2608         int score = OOM_ADJUST_MIN;
2609 #endif
2610         FILE *fp;
2611         struct stat st;
2612         char *envp;
2613
2614         envp = getenv("OOMScoreAdjust");
2615         if (envp) {
2616                 condlog(3, "Using systemd provided OOMScoreAdjust");
2617                 return;
2618         }
2619         do {
2620                 if (stat(file, &st) == 0){
2621                         fp = fopen(file, "w");
2622                         if (!fp) {
2623                                 condlog(0, "couldn't fopen %s : %s", file,
2624                                         strerror(errno));
2625                                 return;
2626                         }
2627                         fprintf(fp, "%i", score);
2628                         fclose(fp);
2629                         return;
2630                 }
2631                 if (errno != ENOENT) {
2632                         condlog(0, "couldn't stat %s : %s", file,
2633                                 strerror(errno));
2634                         return;
2635                 }
2636 #ifdef OOM_ADJUST_MIN
2637                 file = "/proc/self/oom_adj";
2638                 score = OOM_ADJUST_MIN;
2639 #else
2640                 retry = 0;
2641 #endif
2642         } while (retry--);
2643         condlog(0, "couldn't adjust oom score");
2644 }
2645
2646 static int
2647 child (void * param)
2648 {
2649         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2650         pthread_attr_t log_attr, misc_attr, uevent_attr;
2651         struct vectors * vecs;
2652         struct multipath * mpp;
2653         int i;
2654 #ifdef USE_SYSTEMD
2655         unsigned long checkint;
2656         int startup_done = 0;
2657 #endif
2658         int rc;
2659         int pid_fd = -1;
2660         struct config *conf;
2661         char *envp;
2662         int queue_without_daemon;
2663
2664         mlockall(MCL_CURRENT | MCL_FUTURE);
2665         signal_init();
2666         rcu_init();
2667
2668         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2669         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2670         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2671         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2672
2673         if (logsink == 1) {
2674                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2675                 log_thread_start(&log_attr);
2676                 pthread_attr_destroy(&log_attr);
2677         }
2678         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2679         if (pid_fd < 0) {
2680                 condlog(1, "failed to create pidfile");
2681                 if (logsink == 1)
2682                         log_thread_stop();
2683                 exit(1);
2684         }
2685
2686         post_config_state(DAEMON_START);
2687
2688         condlog(2, "--------start up--------");
2689         condlog(2, "read " DEFAULT_CONFIGFILE);
2690
2691         conf = load_config(DEFAULT_CONFIGFILE);
2692         if (!conf)
2693                 goto failed;
2694
2695         if (verbosity)
2696                 conf->verbosity = verbosity;
2697         if (bindings_read_only)
2698                 conf->bindings_read_only = bindings_read_only;
2699         uxsock_timeout = conf->uxsock_timeout;
2700         rcu_assign_pointer(multipath_conf, conf);
2701         if (init_checkers(conf->multipath_dir)) {
2702                 condlog(0, "failed to initialize checkers");
2703                 goto failed;
2704         }
2705         if (init_prio(conf->multipath_dir)) {
2706                 condlog(0, "failed to initialize prioritizers");
2707                 goto failed;
2708         }
2709         /* Failing this is non-fatal */
2710
2711         init_foreign(conf->multipath_dir);
2712
2713         if (poll_dmevents)
2714                 poll_dmevents = dmevent_poll_supported();
2715         setlogmask(LOG_UPTO(conf->verbosity + 3));
2716
2717         envp = getenv("LimitNOFILE");
2718
2719         if (envp)
2720                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2721         else
2722                 set_max_fds(conf->max_fds);
2723
2724         vecs = gvecs = init_vecs();
2725         if (!vecs)
2726                 goto failed;
2727
2728         setscheduler();
2729         set_oom_adj();
2730
2731 #ifdef USE_SYSTEMD
2732         envp = getenv("WATCHDOG_USEC");
2733         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2734                 /* Value is in microseconds */
2735                 conf->max_checkint = checkint / 1000000;
2736                 /* Rescale checkint */
2737                 if (conf->checkint > conf->max_checkint)
2738                         conf->checkint = conf->max_checkint;
2739                 else
2740                         conf->checkint = conf->max_checkint / 4;
2741                 condlog(3, "enabling watchdog, interval %d max %d",
2742                         conf->checkint, conf->max_checkint);
2743                 use_watchdog = conf->checkint;
2744         }
2745 #endif
2746         /*
2747          * Startup done, invalidate configuration
2748          */
2749         conf = NULL;
2750
2751         pthread_cleanup_push(config_cleanup, NULL);
2752         pthread_mutex_lock(&config_lock);
2753
2754         __post_config_state(DAEMON_IDLE);
2755         rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
2756         if (!rc) {
2757                 /* Wait for uxlsnr startup */
2758                 while (running_state == DAEMON_IDLE)
2759                         pthread_cond_wait(&config_cond, &config_lock);
2760         }
2761         pthread_cleanup_pop(1);
2762
2763         if (rc) {
2764                 condlog(0, "failed to create cli listener: %d", rc);
2765                 goto failed;
2766         }
2767         else if (running_state != DAEMON_CONFIGURE) {
2768                 condlog(0, "cli listener failed to start");
2769                 goto failed;
2770         }
2771
2772         if (poll_dmevents) {
2773                 if (init_dmevent_waiter(vecs)) {
2774                         condlog(0, "failed to allocate dmevents waiter info");
2775                         goto failed;
2776                 }
2777                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2778                                          wait_dmevents, NULL))) {
2779                         condlog(0, "failed to create dmevent waiter thread: %d",
2780                                 rc);
2781                         goto failed;
2782                 }
2783         }
2784
2785         /*
2786          * Start uevent listener early to catch events
2787          */
2788         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2789                 condlog(0, "failed to create uevent thread: %d", rc);
2790                 goto failed;
2791         }
2792         pthread_attr_destroy(&uevent_attr);
2793
2794         /*
2795          * start threads
2796          */
2797         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2798                 condlog(0,"failed to create checker loop thread: %d", rc);
2799                 goto failed;
2800         }
2801         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2802                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2803                 goto failed;
2804         }
2805         pthread_attr_destroy(&misc_attr);
2806
2807         while (running_state != DAEMON_SHUTDOWN) {
2808                 pthread_cleanup_push(config_cleanup, NULL);
2809                 pthread_mutex_lock(&config_lock);
2810                 if (running_state != DAEMON_CONFIGURE &&
2811                     running_state != DAEMON_SHUTDOWN) {
2812                         pthread_cond_wait(&config_cond, &config_lock);
2813                 }
2814                 pthread_cleanup_pop(1);
2815                 if (running_state == DAEMON_CONFIGURE) {
2816                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2817                         lock(&vecs->lock);
2818                         pthread_testcancel();
2819                         if (!need_to_delay_reconfig(vecs)) {
2820                                 reconfigure(vecs);
2821                         } else {
2822                                 conf = get_multipath_config();
2823                                 conf->delayed_reconfig = 1;
2824                                 put_multipath_config(conf);
2825                         }
2826                         lock_cleanup_pop(vecs->lock);
2827                         post_config_state(DAEMON_IDLE);
2828 #ifdef USE_SYSTEMD
2829                         if (!startup_done) {
2830                                 sd_notify(0, "READY=1");
2831                                 startup_done = 1;
2832                         }
2833 #endif
2834                 }
2835         }
2836
2837         lock(&vecs->lock);
2838         conf = get_multipath_config();
2839         queue_without_daemon = conf->queue_without_daemon;
2840         put_multipath_config(conf);
2841         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2842                 vector_foreach_slot(vecs->mpvec, mpp, i)
2843                         dm_queue_if_no_path(mpp->alias, 0);
2844         remove_maps_and_stop_waiters(vecs);
2845         unlock(&vecs->lock);
2846
2847         pthread_cancel(check_thr);
2848         pthread_cancel(uevent_thr);
2849         pthread_cancel(uxlsnr_thr);
2850         pthread_cancel(uevq_thr);
2851         if (poll_dmevents)
2852                 pthread_cancel(dmevent_thr);
2853
2854         pthread_join(check_thr, NULL);
2855         pthread_join(uevent_thr, NULL);
2856         pthread_join(uxlsnr_thr, NULL);
2857         pthread_join(uevq_thr, NULL);
2858         if (poll_dmevents)
2859                 pthread_join(dmevent_thr, NULL);
2860
2861         stop_io_err_stat_thread();
2862
2863         lock(&vecs->lock);
2864         free_pathvec(vecs->pathvec, FREE_PATHS);
2865         vecs->pathvec = NULL;
2866         unlock(&vecs->lock);
2867
2868         pthread_mutex_destroy(&vecs->lock.mutex);
2869         FREE(vecs);
2870         vecs = NULL;
2871
2872         cleanup_foreign();
2873         cleanup_checkers();
2874         cleanup_prio();
2875         if (poll_dmevents)
2876                 cleanup_dmevent_waiter();
2877
2878         dm_lib_release();
2879         dm_lib_exit();
2880
2881         /* We're done here */
2882         condlog(3, "unlink pidfile");
2883         unlink(DEFAULT_PIDFILE);
2884
2885         condlog(2, "--------shut down-------");
2886
2887         if (logsink == 1)
2888                 log_thread_stop();
2889
2890         /*
2891          * Freeing config must be done after condlog() and dm_lib_exit(),
2892          * because logging functions like dlog() and dm_write_log()
2893          * reference the config.
2894          */
2895         conf = rcu_dereference(multipath_conf);
2896         rcu_assign_pointer(multipath_conf, NULL);
2897         call_rcu(&conf->rcu, rcu_free_config);
2898         udev_unref(udev);
2899         udev = NULL;
2900         pthread_attr_destroy(&waiter_attr);
2901         pthread_attr_destroy(&io_err_stat_attr);
2902 #ifdef _DEBUG_
2903         dbg_free_final(NULL);
2904 #endif
2905
2906 #ifdef USE_SYSTEMD
2907         sd_notify(0, "ERRNO=0");
2908 #endif
2909         exit(0);
2910
2911 failed:
2912 #ifdef USE_SYSTEMD
2913         sd_notify(0, "ERRNO=1");
2914 #endif
2915         if (pid_fd >= 0)
2916                 close(pid_fd);
2917         exit(1);
2918 }
2919
2920 static int
2921 daemonize(void)
2922 {
2923         int pid;
2924         int dev_null_fd;
2925
2926         if( (pid = fork()) < 0){
2927                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2928                 return -1;
2929         }
2930         else if (pid != 0)
2931                 return pid;
2932
2933         setsid();
2934
2935         if ( (pid = fork()) < 0)
2936                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2937         else if (pid != 0)
2938                 _exit(0);
2939
2940         if (chdir("/") < 0)
2941                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2942
2943         dev_null_fd = open("/dev/null", O_RDWR);
2944         if (dev_null_fd < 0){
2945                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2946                         strerror(errno));
2947                 _exit(0);
2948         }
2949
2950         close(STDIN_FILENO);
2951         if (dup(dev_null_fd) < 0) {
2952                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2953                         strerror(errno));
2954                 _exit(0);
2955         }
2956         close(STDOUT_FILENO);
2957         if (dup(dev_null_fd) < 0) {
2958                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2959                         strerror(errno));
2960                 _exit(0);
2961         }
2962         close(STDERR_FILENO);
2963         if (dup(dev_null_fd) < 0) {
2964                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2965                         strerror(errno));
2966                 _exit(0);
2967         }
2968         close(dev_null_fd);
2969         daemon_pid = getpid();
2970         return 0;
2971 }
2972
2973 int
2974 main (int argc, char *argv[])
2975 {
2976         extern char *optarg;
2977         extern int optind;
2978         int arg;
2979         int err;
2980         int foreground = 0;
2981         struct config *conf;
2982
2983         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2984                                    "Manipulated through RCU");
2985         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2986                 "Suppress complaints about unprotected running_state reads");
2987         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2988                 "Suppress complaints about this scalar variable");
2989
2990         logsink = 1;
2991
2992         if (getuid() != 0) {
2993                 fprintf(stderr, "need to be root\n");
2994                 exit(1);
2995         }
2996
2997         /* make sure we don't lock any path */
2998         if (chdir("/") < 0)
2999                 fprintf(stderr, "can't chdir to root directory : %s\n",
3000                         strerror(errno));
3001         umask(umask(077) | 022);
3002
3003         pthread_cond_init_mono(&config_cond);
3004
3005         udev = udev_new();
3006         libmp_udev_set_sync_support(0);
3007
3008         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
3009                 switch(arg) {
3010                 case 'd':
3011                         foreground = 1;
3012                         if (logsink > 0)
3013                                 logsink = 0;
3014                         //debug=1; /* ### comment me out ### */
3015                         break;
3016                 case 'v':
3017                         if (sizeof(optarg) > sizeof(char *) ||
3018                             !isdigit(optarg[0]))
3019                                 exit(1);
3020
3021                         verbosity = atoi(optarg);
3022                         break;
3023                 case 's':
3024                         logsink = -1;
3025                         break;
3026                 case 'k':
3027                         logsink = 0;
3028                         conf = load_config(DEFAULT_CONFIGFILE);
3029                         if (!conf)
3030                                 exit(1);
3031                         if (verbosity)
3032                                 conf->verbosity = verbosity;
3033                         uxsock_timeout = conf->uxsock_timeout;
3034                         err = uxclnt(optarg, uxsock_timeout + 100);
3035                         free_config(conf);
3036                         return err;
3037                 case 'B':
3038                         bindings_read_only = 1;
3039                         break;
3040                 case 'n':
3041                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3042                         break;
3043                 case 'w':
3044                         poll_dmevents = 0;
3045                         break;
3046                 default:
3047                         fprintf(stderr, "Invalid argument '-%c'\n",
3048                                 optopt);
3049                         exit(1);
3050                 }
3051         }
3052         if (optind < argc) {
3053                 char cmd[CMDSIZE];
3054                 char * s = cmd;
3055                 char * c = s;
3056
3057                 logsink = 0;
3058                 conf = load_config(DEFAULT_CONFIGFILE);
3059                 if (!conf)
3060                         exit(1);
3061                 if (verbosity)
3062                         conf->verbosity = verbosity;
3063                 uxsock_timeout = conf->uxsock_timeout;
3064                 memset(cmd, 0x0, CMDSIZE);
3065                 while (optind < argc) {
3066                         if (strchr(argv[optind], ' '))
3067                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3068                         else
3069                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3070                         optind++;
3071                 }
3072                 c += snprintf(c, s + CMDSIZE - c, "\n");
3073                 err = uxclnt(s, uxsock_timeout + 100);
3074                 free_config(conf);
3075                 return err;
3076         }
3077
3078         if (foreground) {
3079                 if (!isatty(fileno(stdout)))
3080                         setbuf(stdout, NULL);
3081                 err = 0;
3082                 daemon_pid = getpid();
3083         } else
3084                 err = daemonize();
3085
3086         if (err < 0)
3087                 /* error */
3088                 exit(1);
3089         else if (err > 0)
3090                 /* parent dies */
3091                 exit(0);
3092         else
3093                 /* child lives */
3094                 return (child(NULL));
3095 }
3096
3097 void *  mpath_pr_event_handler_fn (void * pathp )
3098 {
3099         struct multipath * mpp;
3100         int i, ret, isFound;
3101         struct path * pp = (struct path *)pathp;
3102         struct prout_param_descriptor *param;
3103         struct prin_resp *resp;
3104
3105         rcu_register_thread();
3106         mpp = pp->mpp;
3107
3108         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3109         if (!resp){
3110                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3111                 goto out;
3112         }
3113
3114         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3115         if (ret != MPATH_PR_SUCCESS )
3116         {
3117                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3118                 goto out;
3119         }
3120
3121         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3122                         resp->prin_descriptor.prin_readkeys.additional_length );
3123
3124         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3125         {
3126                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3127                 ret = MPATH_PR_SUCCESS;
3128                 goto out;
3129         }
3130         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3131                 get_be64(mpp->reservation_key));
3132
3133         isFound =0;
3134         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3135         {
3136                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3137                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3138                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3139                 {
3140                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3141                         isFound =1;
3142                         break;
3143                 }
3144         }
3145         if (!isFound)
3146         {
3147                 condlog(0, "%s: Either device not registered or ", pp->dev);
3148                 condlog(0, "host is not authorised for registration. Skip path");
3149                 ret = MPATH_PR_OTHER;
3150                 goto out;
3151         }
3152
3153         param= malloc(sizeof(struct prout_param_descriptor));
3154         memset(param, 0 , sizeof(struct prout_param_descriptor));
3155         param->sa_flags = mpp->sa_flags;
3156         memcpy(param->sa_key, &mpp->reservation_key, 8);
3157         param->num_transportid = 0;
3158
3159         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3160
3161         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3162         if (ret != MPATH_PR_SUCCESS )
3163         {
3164                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3165         }
3166         mpp->prflag = 1;
3167
3168         free(param);
3169 out:
3170         if (resp)
3171                 free(resp);
3172         rcu_unregister_thread();
3173         return NULL;
3174 }
3175
3176 int mpath_pr_event_handle(struct path *pp)
3177 {
3178         pthread_t thread;
3179         int rc;
3180         pthread_attr_t attr;
3181         struct multipath * mpp;
3182
3183         if (pp->bus != SYSFS_BUS_SCSI)
3184                 return 0;
3185
3186         mpp = pp->mpp;
3187
3188         if (!get_be64(mpp->reservation_key))
3189                 return -1;
3190
3191         pthread_attr_init(&attr);
3192         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3193
3194         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3195         if (rc) {
3196                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3197                 return -1;
3198         }
3199         pthread_attr_destroy(&attr);
3200         rc = pthread_join(thread, NULL);
3201         return 0;
3202 }