multipathd: ignore failed wwid recheck
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69 #include "uxsock.h"
70
71 #include "mpath_cmd.h"
72 #include "mpath_persist.h"
73
74 #include "prioritizers/alua_rtpg.h"
75
76 #include "main.h"
77 #include "pidfile.h"
78 #include "uxlsnr.h"
79 #include "uxclnt.h"
80 #include "cli.h"
81 #include "cli_handlers.h"
82 #include "lock.h"
83 #include "waiter.h"
84 #include "dmevents.h"
85 #include "io_err_stat.h"
86 #include "wwids.h"
87 #include "foreign.h"
88 #include "../third-party/valgrind/drd.h"
89
90 #define FILE_NAME_SIZE 256
91 #define CMDSIZE 160
92
93 #define LOG_MSG(lvl, verb, pp)                                  \
94 do {                                                            \
95         if (pp->mpp && checker_selected(&pp->checker) &&        \
96             lvl <= verb) {                                      \
97                 if (pp->offline)                                \
98                         condlog(lvl, "%s: %s - path offline",   \
99                                 pp->mpp->alias, pp->dev);       \
100                 else  {                                         \
101                         const char *__m =                       \
102                                 checker_message(&pp->checker);  \
103                                                                 \
104                         if (strlen(__m))                              \
105                                 condlog(lvl, "%s: %s - %s checker%s", \
106                                         pp->mpp->alias,               \
107                                         pp->dev,                      \
108                                         checker_name(&pp->checker),   \
109                                         __m);                         \
110                 }                                                     \
111         }                                                             \
112 } while(0)
113
114 struct mpath_event_param
115 {
116         char * devname;
117         struct multipath *mpp;
118 };
119
120 int logsink;
121 int uxsock_timeout;
122 int verbosity;
123 int bindings_read_only;
124 int ignore_new_devs;
125 #ifdef NO_DMEVENTS_POLL
126 int poll_dmevents = 0;
127 #else
128 int poll_dmevents = 1;
129 #endif
130 enum daemon_status running_state = DAEMON_INIT;
131 pid_t daemon_pid;
132 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
133 pthread_cond_t config_cond;
134
135 /*
136  * global copy of vecs for use in sig handlers
137  */
138 struct vectors * gvecs;
139
140 struct udev * udev;
141
142 struct config *multipath_conf;
143
144 /* Local variables */
145 static volatile sig_atomic_t exit_sig;
146 static volatile sig_atomic_t reconfig_sig;
147 static volatile sig_atomic_t log_reset_sig;
148
149 const char *
150 daemon_status(void)
151 {
152         switch (running_state) {
153         case DAEMON_INIT:
154                 return "init";
155         case DAEMON_START:
156                 return "startup";
157         case DAEMON_CONFIGURE:
158                 return "configure";
159         case DAEMON_IDLE:
160                 return "idle";
161         case DAEMON_RUNNING:
162                 return "running";
163         case DAEMON_SHUTDOWN:
164                 return "shutdown";
165         }
166         return NULL;
167 }
168
169 /*
170  * I love you too, systemd ...
171  */
172 const char *
173 sd_notify_status(void)
174 {
175         switch (running_state) {
176         case DAEMON_INIT:
177                 return "STATUS=init";
178         case DAEMON_START:
179                 return "STATUS=startup";
180         case DAEMON_CONFIGURE:
181                 return "STATUS=configure";
182         case DAEMON_IDLE:
183         case DAEMON_RUNNING:
184                 return "STATUS=up";
185         case DAEMON_SHUTDOWN:
186                 return "STATUS=shutdown";
187         }
188         return NULL;
189 }
190
191 #ifdef USE_SYSTEMD
192 static void do_sd_notify(enum daemon_status old_state)
193 {
194         /*
195          * Checkerloop switches back and forth between idle and running state.
196          * No need to tell systemd each time.
197          * These notifications cause a lot of overhead on dbus.
198          */
199         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
200             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
201                 return;
202         sd_notify(0, sd_notify_status());
203 }
204 #endif
205
206 static void config_cleanup(void *arg)
207 {
208         pthread_mutex_unlock(&config_lock);
209 }
210
211 static void __post_config_state(enum daemon_status state)
212 {
213         if (state != running_state && running_state != DAEMON_SHUTDOWN) {
214                 enum daemon_status old_state = running_state;
215
216                 running_state = state;
217                 pthread_cond_broadcast(&config_cond);
218 #ifdef USE_SYSTEMD
219                 do_sd_notify(old_state);
220 #endif
221         }
222 }
223
224 void post_config_state(enum daemon_status state)
225 {
226         pthread_mutex_lock(&config_lock);
227         pthread_cleanup_push(config_cleanup, NULL);
228         __post_config_state(state);
229         pthread_cleanup_pop(1);
230 }
231
232 int set_config_state(enum daemon_status state)
233 {
234         int rc = 0;
235
236         pthread_cleanup_push(config_cleanup, NULL);
237         pthread_mutex_lock(&config_lock);
238         if (running_state != state) {
239                 enum daemon_status old_state = running_state;
240
241                 if (running_state == DAEMON_SHUTDOWN)
242                         rc = EINVAL;
243                 else if (running_state != DAEMON_IDLE) {
244                         struct timespec ts;
245
246                         if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
247                                 ts.tv_sec += 1;
248                                 rc = pthread_cond_timedwait(&config_cond,
249                                                             &config_lock, &ts);
250                         }
251                 }
252                 if (!rc) {
253                         running_state = state;
254                         pthread_cond_broadcast(&config_cond);
255 #ifdef USE_SYSTEMD
256                         do_sd_notify(old_state);
257 #endif
258                 }
259         }
260         pthread_cleanup_pop(1);
261         return rc;
262 }
263
264 struct config *get_multipath_config(void)
265 {
266         rcu_read_lock();
267         return rcu_dereference(multipath_conf);
268 }
269
270 void put_multipath_config(void *arg)
271 {
272         rcu_read_unlock();
273 }
274
275 static int
276 need_switch_pathgroup (struct multipath * mpp, int refresh)
277 {
278         struct pathgroup * pgp;
279         struct path * pp;
280         unsigned int i, j;
281         struct config *conf;
282         int bestpg;
283
284         if (!mpp)
285                 return 0;
286
287         /*
288          * Refresh path priority values
289          */
290         if (refresh) {
291                 vector_foreach_slot (mpp->pg, pgp, i) {
292                         vector_foreach_slot (pgp->paths, pp, j) {
293                                 conf = get_multipath_config();
294                                 pthread_cleanup_push(put_multipath_config,
295                                                      conf);
296                                 pathinfo(pp, conf, DI_PRIO);
297                                 pthread_cleanup_pop(1);
298                         }
299                 }
300         }
301
302         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
303                 return 0;
304
305         bestpg = select_path_group(mpp);
306         if (mpp->pgfailback == -FAILBACK_MANUAL)
307                 return 0;
308
309         mpp->bestpg = bestpg;
310         if (mpp->bestpg != mpp->nextpg)
311                 return 1;
312
313         return 0;
314 }
315
316 static void
317 switch_pathgroup (struct multipath * mpp)
318 {
319         mpp->stat_switchgroup++;
320         dm_switchgroup(mpp->alias, mpp->bestpg);
321         condlog(2, "%s: switch to path group #%i",
322                  mpp->alias, mpp->bestpg);
323 }
324
325 static int
326 wait_for_events(struct multipath *mpp, struct vectors *vecs)
327 {
328         if (poll_dmevents)
329                 return watch_dmevents(mpp->alias);
330         else
331                 return start_waiter_thread(mpp, vecs);
332 }
333
334 static void
335 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
336 {
337         /* devices are automatically removed by the dmevent polling code,
338          * so they don't need to be manually removed here */
339         condlog(3, "%s: removing map from internal tables", mpp->alias);
340         if (!poll_dmevents)
341                 stop_waiter_thread(mpp, vecs);
342         remove_map(mpp, vecs, PURGE_VEC);
343 }
344
345 static void
346 remove_maps_and_stop_waiters(struct vectors *vecs)
347 {
348         int i;
349         struct multipath * mpp;
350
351         if (!vecs)
352                 return;
353
354         if (!poll_dmevents) {
355                 vector_foreach_slot(vecs->mpvec, mpp, i)
356                         stop_waiter_thread(mpp, vecs);
357         }
358         else
359                 unwatch_all_dmevents();
360
361         remove_maps(vecs);
362 }
363
364 static void
365 set_multipath_wwid (struct multipath * mpp)
366 {
367         if (strlen(mpp->wwid))
368                 return;
369
370         dm_get_uuid(mpp->alias, mpp->wwid);
371 }
372
373 static void set_no_path_retry(struct multipath *mpp)
374 {
375         char is_queueing = 0;
376
377         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
378         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
379                 is_queueing = 1;
380
381         switch (mpp->no_path_retry) {
382         case NO_PATH_RETRY_UNDEF:
383                 break;
384         case NO_PATH_RETRY_FAIL:
385                 if (is_queueing)
386                         dm_queue_if_no_path(mpp->alias, 0);
387                 break;
388         case NO_PATH_RETRY_QUEUE:
389                 if (!is_queueing)
390                         dm_queue_if_no_path(mpp->alias, 1);
391                 break;
392         default:
393                 if (mpp->nr_active > 0) {
394                         mpp->retry_tick = 0;
395                         if (!is_queueing)
396                                 dm_queue_if_no_path(mpp->alias, 1);
397                 } else if (is_queueing && mpp->retry_tick == 0)
398                         enter_recovery_mode(mpp);
399                 break;
400         }
401 }
402
403 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
404                       int reset)
405 {
406         if (dm_get_info(mpp->alias, &mpp->dmi)) {
407                 /* Error accessing table */
408                 condlog(3, "%s: cannot access table", mpp->alias);
409                 goto out;
410         }
411
412         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
413                 condlog(0, "%s: failed to setup multipath", mpp->alias);
414                 goto out;
415         }
416
417         if (reset) {
418                 set_no_path_retry(mpp);
419                 if (VECTOR_SIZE(mpp->paths) != 0)
420                         dm_cancel_deferred_remove(mpp);
421         }
422
423         return 0;
424 out:
425         remove_map_and_stop_waiter(mpp, vecs);
426         return 1;
427 }
428
429 int update_multipath (struct vectors *vecs, char *mapname, int reset)
430 {
431         struct multipath *mpp;
432         struct pathgroup  *pgp;
433         struct path *pp;
434         int i, j;
435
436         mpp = find_mp_by_alias(vecs->mpvec, mapname);
437
438         if (!mpp) {
439                 condlog(3, "%s: multipath map not found", mapname);
440                 return 2;
441         }
442
443         if (__setup_multipath(vecs, mpp, reset))
444                 return 1; /* mpp freed in setup_multipath */
445
446         /*
447          * compare checkers states with DM states
448          */
449         vector_foreach_slot (mpp->pg, pgp, i) {
450                 vector_foreach_slot (pgp->paths, pp, j) {
451                         if (pp->dmstate != PSTATE_FAILED)
452                                 continue;
453
454                         if (pp->state != PATH_DOWN) {
455                                 struct config *conf;
456                                 int oldstate = pp->state;
457                                 int checkint;
458
459                                 conf = get_multipath_config();
460                                 checkint = conf->checkint;
461                                 put_multipath_config(conf);
462                                 condlog(2, "%s: mark as failed", pp->dev);
463                                 mpp->stat_path_failures++;
464                                 pp->state = PATH_DOWN;
465                                 if (oldstate == PATH_UP ||
466                                     oldstate == PATH_GHOST)
467                                         update_queue_mode_del_path(mpp);
468
469                                 /*
470                                  * if opportune,
471                                  * schedule the next check earlier
472                                  */
473                                 if (pp->tick > checkint)
474                                         pp->tick = checkint;
475                         }
476                 }
477         }
478         return 0;
479 }
480
481 static int
482 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
483 {
484         int retries = 3;
485         char params[PARAMS_SIZE] = {0};
486
487 retry:
488         condlog(4, "%s: updating new map", mpp->alias);
489         if (adopt_paths(vecs->pathvec, mpp)) {
490                 condlog(0, "%s: failed to adopt paths for new map update",
491                         mpp->alias);
492                 retries = -1;
493                 goto fail;
494         }
495         verify_paths(mpp, vecs);
496         mpp->action = ACT_RELOAD;
497
498         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
499                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
500                 retries = -1;
501                 goto fail;
502         }
503         if (domap(mpp, params, 1) == DOMAP_FAIL && retries-- > 0) {
504                 condlog(0, "%s: map_udate sleep", mpp->alias);
505                 sleep(1);
506                 goto retry;
507         }
508         dm_lib_release();
509
510 fail:
511         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
512                 condlog(0, "%s: failed to create new map", mpp->alias);
513                 remove_map(mpp, vecs, 1);
514                 return 1;
515         }
516
517         if (setup_multipath(vecs, mpp))
518                 return 1;
519
520         sync_map_state(mpp);
521
522         if (retries < 0)
523                 condlog(0, "%s: failed reload in new map update", mpp->alias);
524         return 0;
525 }
526
527 static struct multipath *
528 add_map_without_path (struct vectors *vecs, const char *alias)
529 {
530         struct multipath * mpp = alloc_multipath();
531         struct config *conf;
532
533         if (!mpp)
534                 return NULL;
535         if (!alias) {
536                 FREE(mpp);
537                 return NULL;
538         }
539
540         mpp->alias = STRDUP(alias);
541
542         if (dm_get_info(mpp->alias, &mpp->dmi)) {
543                 condlog(3, "%s: cannot access table", mpp->alias);
544                 goto out;
545         }
546         set_multipath_wwid(mpp);
547         conf = get_multipath_config();
548         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
549         put_multipath_config(conf);
550
551         if (update_multipath_table(mpp, vecs->pathvec, 1))
552                 goto out;
553         if (update_multipath_status(mpp))
554                 goto out;
555
556         if (!vector_alloc_slot(vecs->mpvec))
557                 goto out;
558
559         vector_set_slot(vecs->mpvec, mpp);
560
561         if (update_map(mpp, vecs, 1) != 0) /* map removed */
562                 return NULL;
563
564         return mpp;
565 out:
566         remove_map(mpp, vecs, PURGE_VEC);
567         return NULL;
568 }
569
570 static int
571 coalesce_maps(struct vectors *vecs, vector nmpv)
572 {
573         struct multipath * ompp;
574         vector ompv = vecs->mpvec;
575         unsigned int i, reassign_maps;
576         struct config *conf;
577
578         conf = get_multipath_config();
579         reassign_maps = conf->reassign_maps;
580         put_multipath_config(conf);
581         vector_foreach_slot (ompv, ompp, i) {
582                 condlog(3, "%s: coalesce map", ompp->alias);
583                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
584                         /*
585                          * remove all current maps not allowed by the
586                          * current configuration
587                          */
588                         if (dm_flush_map(ompp->alias)) {
589                                 condlog(0, "%s: unable to flush devmap",
590                                         ompp->alias);
591                                 /*
592                                  * may be just because the device is open
593                                  */
594                                 if (setup_multipath(vecs, ompp) != 0) {
595                                         i--;
596                                         continue;
597                                 }
598                                 if (!vector_alloc_slot(nmpv))
599                                         return 1;
600
601                                 vector_set_slot(nmpv, ompp);
602
603                                 vector_del_slot(ompv, i);
604                                 i--;
605                         }
606                         else {
607                                 dm_lib_release();
608                                 condlog(2, "%s devmap removed", ompp->alias);
609                         }
610                 } else if (reassign_maps) {
611                         condlog(3, "%s: Reassign existing device-mapper"
612                                 " devices", ompp->alias);
613                         dm_reassign(ompp->alias);
614                 }
615         }
616         return 0;
617 }
618
619 static void
620 sync_maps_state(vector mpvec)
621 {
622         unsigned int i;
623         struct multipath *mpp;
624
625         vector_foreach_slot (mpvec, mpp, i)
626                 sync_map_state(mpp);
627 }
628
629 static int
630 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
631 {
632         int r;
633
634         if (nopaths)
635                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
636         else
637                 r = dm_flush_map(mpp->alias);
638         /*
639          * clear references to this map before flushing so we can ignore
640          * the spurious uevent we may generate with the dm_flush_map call below
641          */
642         if (r) {
643                 /*
644                  * May not really be an error -- if the map was already flushed
645                  * from the device mapper by dmsetup(8) for instance.
646                  */
647                 if (r == 1)
648                         condlog(0, "%s: can't flush", mpp->alias);
649                 else {
650                         condlog(2, "%s: devmap deferred remove", mpp->alias);
651                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
652                 }
653                 return r;
654         }
655         else {
656                 dm_lib_release();
657                 condlog(2, "%s: map flushed", mpp->alias);
658         }
659
660         orphan_paths(vecs->pathvec, mpp, "map flushed");
661         remove_map_and_stop_waiter(mpp, vecs);
662
663         return 0;
664 }
665
666 static int
667 uev_add_map (struct uevent * uev, struct vectors * vecs)
668 {
669         char *alias;
670         int major = -1, minor = -1, rc;
671
672         condlog(3, "%s: add map (uevent)", uev->kernel);
673         alias = uevent_get_dm_name(uev);
674         if (!alias) {
675                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
676                 major = uevent_get_major(uev);
677                 minor = uevent_get_minor(uev);
678                 alias = dm_mapname(major, minor);
679                 if (!alias) {
680                         condlog(2, "%s: mapname not found for %d:%d",
681                                 uev->kernel, major, minor);
682                         return 1;
683                 }
684         }
685         pthread_cleanup_push(cleanup_lock, &vecs->lock);
686         lock(&vecs->lock);
687         pthread_testcancel();
688         rc = ev_add_map(uev->kernel, alias, vecs);
689         lock_cleanup_pop(vecs->lock);
690         FREE(alias);
691         return rc;
692 }
693
694 /*
695  * ev_add_map expects that the multipath device already exists in kernel
696  * before it is called. It just adds a device to multipathd or updates an
697  * existing device.
698  */
699 int
700 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
701 {
702         struct multipath * mpp;
703         int delayed_reconfig, reassign_maps;
704         struct config *conf;
705
706         if (dm_is_mpath(alias) != 1) {
707                 condlog(4, "%s: not a multipath map", alias);
708                 return 0;
709         }
710
711         mpp = find_mp_by_alias(vecs->mpvec, alias);
712
713         if (mpp) {
714                 if (mpp->wait_for_udev > 1) {
715                         condlog(2, "%s: performing delayed actions",
716                                 mpp->alias);
717                         if (update_map(mpp, vecs, 0))
718                                 /* setup multipathd removed the map */
719                                 return 1;
720                 }
721                 conf = get_multipath_config();
722                 delayed_reconfig = conf->delayed_reconfig;
723                 reassign_maps = conf->reassign_maps;
724                 put_multipath_config(conf);
725                 if (mpp->wait_for_udev) {
726                         mpp->wait_for_udev = 0;
727                         if (delayed_reconfig &&
728                             !need_to_delay_reconfig(vecs)) {
729                                 condlog(2, "reconfigure (delayed)");
730                                 set_config_state(DAEMON_CONFIGURE);
731                                 return 0;
732                         }
733                 }
734                 /*
735                  * Not really an error -- we generate our own uevent
736                  * if we create a multipath mapped device as a result
737                  * of uev_add_path
738                  */
739                 if (reassign_maps) {
740                         condlog(3, "%s: Reassign existing device-mapper devices",
741                                 alias);
742                         dm_reassign(alias);
743                 }
744                 return 0;
745         }
746         condlog(2, "%s: adding map", alias);
747
748         /*
749          * now we can register the map
750          */
751         if ((mpp = add_map_without_path(vecs, alias))) {
752                 sync_map_state(mpp);
753                 condlog(2, "%s: devmap %s registered", alias, dev);
754                 return 0;
755         } else {
756                 condlog(2, "%s: ev_add_map failed", dev);
757                 return 1;
758         }
759 }
760
761 static int
762 uev_remove_map (struct uevent * uev, struct vectors * vecs)
763 {
764         char *alias;
765         int minor;
766         struct multipath *mpp;
767
768         condlog(3, "%s: remove map (uevent)", uev->kernel);
769         alias = uevent_get_dm_name(uev);
770         if (!alias) {
771                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
772                 return 0;
773         }
774         minor = uevent_get_minor(uev);
775
776         pthread_cleanup_push(cleanup_lock, &vecs->lock);
777         lock(&vecs->lock);
778         pthread_testcancel();
779         mpp = find_mp_by_minor(vecs->mpvec, minor);
780
781         if (!mpp) {
782                 condlog(2, "%s: devmap not registered, can't remove",
783                         uev->kernel);
784                 goto out;
785         }
786         if (strcmp(mpp->alias, alias)) {
787                 condlog(2, "%s: map alias mismatch: have \"%s\", got \"%s\")",
788                         uev->kernel, mpp->alias, alias);
789                 goto out;
790         }
791
792         remove_map_and_stop_waiter(mpp, vecs);
793 out:
794         lock_cleanup_pop(vecs->lock);
795         FREE(alias);
796         return 0;
797 }
798
799 /* Called from CLI handler */
800 int
801 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
802 {
803         struct multipath * mpp;
804
805         mpp = find_mp_by_minor(vecs->mpvec, minor);
806
807         if (!mpp) {
808                 condlog(2, "%s: devmap not registered, can't remove",
809                         devname);
810                 return 1;
811         }
812         if (strcmp(mpp->alias, alias)) {
813                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
814                         mpp->alias, mpp->dmi->minor, minor);
815                 return 1;
816         }
817         return flush_map(mpp, vecs, 0);
818 }
819
820 static int
821 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
822 {
823         struct path *pp;
824         int ret = 0, i;
825         struct config *conf;
826
827         condlog(3, "%s: add path (uevent)", uev->kernel);
828         if (strstr(uev->kernel, "..") != NULL) {
829                 /*
830                  * Don't allow relative device names in the pathvec
831                  */
832                 condlog(0, "%s: path name is invalid", uev->kernel);
833                 return 1;
834         }
835
836         pthread_cleanup_push(cleanup_lock, &vecs->lock);
837         lock(&vecs->lock);
838         pthread_testcancel();
839         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
840         if (pp) {
841                 int r;
842
843                 condlog(3, "%s: spurious uevent, path already in pathvec",
844                         uev->kernel);
845                 if (!pp->mpp && !strlen(pp->wwid)) {
846                         condlog(3, "%s: reinitialize path", uev->kernel);
847                         udev_device_unref(pp->udev);
848                         pp->udev = udev_device_ref(uev->udev);
849                         conf = get_multipath_config();
850                         pthread_cleanup_push(put_multipath_config, conf);
851                         r = pathinfo(pp, conf,
852                                      DI_ALL | DI_BLACKLIST);
853                         pthread_cleanup_pop(1);
854                         if (r == PATHINFO_OK)
855                                 ret = ev_add_path(pp, vecs, need_do_map);
856                         else if (r == PATHINFO_SKIPPED) {
857                                 condlog(3, "%s: remove blacklisted path",
858                                         uev->kernel);
859                                 i = find_slot(vecs->pathvec, (void *)pp);
860                                 if (i != -1)
861                                         vector_del_slot(vecs->pathvec, i);
862                                 free_path(pp);
863                         } else {
864                                 condlog(0, "%s: failed to reinitialize path",
865                                         uev->kernel);
866                                 ret = 1;
867                         }
868                 }
869         }
870         lock_cleanup_pop(vecs->lock);
871         if (pp)
872                 return ret;
873
874         /*
875          * get path vital state
876          */
877         conf = get_multipath_config();
878         pthread_cleanup_push(put_multipath_config, conf);
879         ret = alloc_path_with_pathinfo(conf, uev->udev,
880                                        uev->wwid, DI_ALL, &pp);
881         pthread_cleanup_pop(1);
882         if (!pp) {
883                 if (ret == PATHINFO_SKIPPED)
884                         return 0;
885                 condlog(3, "%s: failed to get path info", uev->kernel);
886                 return 1;
887         }
888         pthread_cleanup_push(cleanup_lock, &vecs->lock);
889         lock(&vecs->lock);
890         pthread_testcancel();
891         ret = store_path(vecs->pathvec, pp);
892         if (!ret) {
893                 conf = get_multipath_config();
894                 pp->checkint = conf->checkint;
895                 put_multipath_config(conf);
896                 ret = ev_add_path(pp, vecs, need_do_map);
897         } else {
898                 condlog(0, "%s: failed to store path info, "
899                         "dropping event",
900                         uev->kernel);
901                 free_path(pp);
902                 ret = 1;
903         }
904         lock_cleanup_pop(vecs->lock);
905         return ret;
906 }
907
908 /*
909  * returns:
910  * 0: added
911  * 1: error
912  */
913 int
914 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
915 {
916         struct multipath * mpp;
917         char params[PARAMS_SIZE] = {0};
918         int retries = 3;
919         int start_waiter = 0;
920         int ret;
921
922         /*
923          * need path UID to go any further
924          */
925         if (strlen(pp->wwid) == 0) {
926                 condlog(0, "%s: failed to get path uid", pp->dev);
927                 goto fail; /* leave path added to pathvec */
928         }
929         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
930         if (mpp && pp->size && mpp->size != pp->size) {
931                 condlog(0, "%s: failed to add new path %s, device size mismatch", mpp->alias, pp->dev);
932                 int i = find_slot(vecs->pathvec, (void *)pp);
933                 if (i != -1)
934                         vector_del_slot(vecs->pathvec, i);
935                 free_path(pp);
936                 return 1;
937         }
938         if (mpp && mpp->wait_for_udev &&
939             (pathcount(mpp, PATH_UP) > 0 ||
940              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
941               mpp->ghost_delay_tick <= 0))) {
942                 /* if wait_for_udev is set and valid paths exist */
943                 condlog(3, "%s: delaying path addition until %s is fully initialized",
944                         pp->dev, mpp->alias);
945                 mpp->wait_for_udev = 2;
946                 orphan_path(pp, "waiting for create to complete");
947                 return 0;
948         }
949
950         pp->mpp = mpp;
951 rescan:
952         if (mpp) {
953                 condlog(4,"%s: adopting all paths for path %s",
954                         mpp->alias, pp->dev);
955                 if (adopt_paths(vecs->pathvec, mpp))
956                         goto fail; /* leave path added to pathvec */
957
958                 verify_paths(mpp, vecs);
959                 mpp->action = ACT_RELOAD;
960         } else {
961                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
962                         orphan_path(pp, "only one path");
963                         return 0;
964                 }
965                 condlog(4,"%s: creating new map", pp->dev);
966                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
967                         mpp->action = ACT_CREATE;
968                         /*
969                          * We don't depend on ACT_CREATE, as domap will
970                          * set it to ACT_NOTHING when complete.
971                          */
972                         start_waiter = 1;
973                 }
974                 if (!start_waiter)
975                         goto fail; /* leave path added to pathvec */
976         }
977
978         /* persistent reservation check*/
979         mpath_pr_event_handle(pp);
980
981         if (!need_do_map)
982                 return 0;
983
984         if (!dm_map_present(mpp->alias)) {
985                 mpp->action = ACT_CREATE;
986                 start_waiter = 1;
987         }
988         /*
989          * push the map to the device-mapper
990          */
991         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
992                 condlog(0, "%s: failed to setup map for addition of new "
993                         "path %s", mpp->alias, pp->dev);
994                 goto fail_map;
995         }
996         /*
997          * reload the map for the multipath mapped device
998          */
999         ret = domap(mpp, params, 1);
1000         while (ret == DOMAP_RETRY && retries-- > 0) {
1001                 condlog(0, "%s: retry domap for addition of new "
1002                         "path %s", mpp->alias, pp->dev);
1003                 sleep(1);
1004                 ret = domap(mpp, params, 1);
1005         }
1006         if (ret == DOMAP_FAIL || ret == DOMAP_RETRY) {
1007                 condlog(0, "%s: failed in domap for addition of new "
1008                         "path %s", mpp->alias, pp->dev);
1009                 /*
1010                  * deal with asynchronous uevents :((
1011                  */
1012                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1013                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
1014                         sleep(1);
1015                         update_mpp_paths(mpp, vecs->pathvec);
1016                         goto rescan;
1017                 }
1018                 else if (mpp->action == ACT_RELOAD)
1019                         condlog(0, "%s: giving up reload", mpp->alias);
1020                 else
1021                         goto fail_map;
1022         }
1023         dm_lib_release();
1024
1025         if ((mpp->action == ACT_CREATE ||
1026              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1027             wait_for_events(mpp, vecs))
1028                         goto fail_map;
1029
1030         /*
1031          * update our state from kernel regardless of create or reload
1032          */
1033         if (setup_multipath(vecs, mpp))
1034                 goto fail; /* if setup_multipath fails, it removes the map */
1035
1036         sync_map_state(mpp);
1037
1038         if (retries >= 0) {
1039                 condlog(2, "%s [%s]: path added to devmap %s",
1040                         pp->dev, pp->dev_t, mpp->alias);
1041                 return 0;
1042         } else
1043                 goto fail;
1044
1045 fail_map:
1046         remove_map(mpp, vecs, 1);
1047 fail:
1048         orphan_path(pp, "failed to add path");
1049         return 1;
1050 }
1051
1052 static int
1053 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1054 {
1055         struct path *pp;
1056         int ret;
1057
1058         condlog(3, "%s: remove path (uevent)", uev->kernel);
1059         delete_foreign(uev->udev);
1060
1061         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1062         lock(&vecs->lock);
1063         pthread_testcancel();
1064         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1065         if (pp)
1066                 ret = ev_remove_path(pp, vecs, need_do_map);
1067         lock_cleanup_pop(vecs->lock);
1068         if (!pp) {
1069                 /* Not an error; path might have been purged earlier */
1070                 condlog(0, "%s: path already removed", uev->kernel);
1071                 return 0;
1072         }
1073         return ret;
1074 }
1075
1076 int
1077 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1078 {
1079         struct multipath * mpp;
1080         int i, retval = 0;
1081         char params[PARAMS_SIZE] = {0};
1082
1083         /*
1084          * avoid referring to the map of an orphaned path
1085          */
1086         if ((mpp = pp->mpp)) {
1087                 /*
1088                  * transform the mp->pg vector of vectors of paths
1089                  * into a mp->params string to feed the device-mapper
1090                  */
1091                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1092                         condlog(0, "%s: failed to update paths",
1093                                 mpp->alias);
1094                         goto fail;
1095                 }
1096
1097                 /*
1098                  * Make sure mpp->hwe doesn't point to freed memory
1099                  * We call extract_hwe_from_path() below to restore mpp->hwe
1100                  */
1101                 if (mpp->hwe == pp->hwe)
1102                         mpp->hwe = NULL;
1103
1104                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1105                         vector_del_slot(mpp->paths, i);
1106
1107                 /*
1108                  * remove the map IF removing the last path
1109                  */
1110                 if (VECTOR_SIZE(mpp->paths) == 0) {
1111                         char alias[WWID_SIZE];
1112
1113                         /*
1114                          * flush_map will fail if the device is open
1115                          */
1116                         strlcpy(alias, mpp->alias, WWID_SIZE);
1117                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1118                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1119                                 mpp->retry_tick = 0;
1120                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1121                                 mpp->disable_queueing = 1;
1122                                 mpp->stat_map_failures++;
1123                                 dm_queue_if_no_path(mpp->alias, 0);
1124                         }
1125                         if (!flush_map(mpp, vecs, 1)) {
1126                                 condlog(2, "%s: removed map after"
1127                                         " removing all paths",
1128                                         alias);
1129                                 retval = 0;
1130                                 goto out;
1131                         }
1132                         /*
1133                          * Not an error, continue
1134                          */
1135                 }
1136
1137                 if (mpp->hwe == NULL)
1138                         extract_hwe_from_path(mpp);
1139
1140                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1141                         condlog(0, "%s: failed to setup map for"
1142                                 " removal of path %s", mpp->alias, pp->dev);
1143                         goto fail;
1144                 }
1145
1146                 if (mpp->wait_for_udev) {
1147                         mpp->wait_for_udev = 2;
1148                         goto out;
1149                 }
1150
1151                 if (!need_do_map)
1152                         goto out;
1153                 /*
1154                  * reload the map
1155                  */
1156                 mpp->action = ACT_RELOAD;
1157                 if (domap(mpp, params, 1) == DOMAP_FAIL) {
1158                         condlog(0, "%s: failed in domap for "
1159                                 "removal of path %s",
1160                                 mpp->alias, pp->dev);
1161                         retval = 1;
1162                 } else {
1163                         /*
1164                          * update our state from kernel
1165                          */
1166                         if (setup_multipath(vecs, mpp))
1167                                 return 1;
1168                         sync_map_state(mpp);
1169
1170                         condlog(2, "%s [%s]: path removed from map %s",
1171                                 pp->dev, pp->dev_t, mpp->alias);
1172                 }
1173         }
1174
1175 out:
1176         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1177                 vector_del_slot(vecs->pathvec, i);
1178
1179         free_path(pp);
1180
1181         return retval;
1182
1183 fail:
1184         remove_map_and_stop_waiter(mpp, vecs);
1185         return 1;
1186 }
1187
1188 static int
1189 uev_update_path (struct uevent *uev, struct vectors * vecs)
1190 {
1191         int ro, retval = 0, rc;
1192         struct path * pp;
1193         struct config *conf;
1194         int disable_changed_wwids;
1195         int needs_reinit = 0;
1196
1197         switch ((rc = change_foreign(uev->udev))) {
1198         case FOREIGN_OK:
1199                 /* known foreign path, ignore event */
1200                 return 0;
1201         case FOREIGN_IGNORED:
1202                 break;
1203         case FOREIGN_ERR:
1204                 condlog(3, "%s: error in change_foreign", __func__);
1205                 break;
1206         default:
1207                 condlog(1, "%s: return code %d of change_forein is unsupported",
1208                         __func__, rc);
1209                 break;
1210         }
1211
1212         conf = get_multipath_config();
1213         disable_changed_wwids = conf->disable_changed_wwids;
1214         put_multipath_config(conf);
1215
1216         ro = uevent_get_disk_ro(uev);
1217
1218         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1219         lock(&vecs->lock);
1220         pthread_testcancel();
1221
1222         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1223         if (pp) {
1224                 struct multipath *mpp = pp->mpp;
1225                 char wwid[WWID_SIZE];
1226
1227                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1228                         needs_reinit = 1;
1229                         goto out;
1230                 }
1231                 /* Don't deal with other types of failed initialization
1232                  * now. check_path will handle it */
1233                 if (!strlen(pp->wwid))
1234                         goto out;
1235
1236                 strcpy(wwid, pp->wwid);
1237                 rc = get_uid(pp, pp->state, uev->udev);
1238
1239                 if (rc != 0)
1240                         strcpy(pp->wwid, wwid);
1241                 else if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1242                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1243                                 uev->kernel, wwid, pp->wwid,
1244                                 (disable_changed_wwids ? "disallowing" :
1245                                  "continuing"));
1246                         strcpy(pp->wwid, wwid);
1247                         if (disable_changed_wwids) {
1248                                 if (!pp->wwid_changed) {
1249                                         pp->wwid_changed = 1;
1250                                         pp->tick = 1;
1251                                         if (pp->mpp)
1252                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1253                                 }
1254                                 goto out;
1255                         }
1256                 } else {
1257                         pp->wwid_changed = 0;
1258                         udev_device_unref(pp->udev);
1259                         pp->udev = udev_device_ref(uev->udev);
1260                         conf = get_multipath_config();
1261                         pthread_cleanup_push(put_multipath_config, conf);
1262                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1263                                 condlog(1, "%s: pathinfo failed after change uevent",
1264                                         uev->kernel);
1265                         pthread_cleanup_pop(1);
1266                 }
1267
1268                 if (mpp && ro >= 0) {
1269                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1270
1271                         if (mpp->wait_for_udev)
1272                                 mpp->wait_for_udev = 2;
1273                         else {
1274                                 if (ro == 1)
1275                                         pp->mpp->force_readonly = 1;
1276                                 retval = reload_map(vecs, mpp, 0, 1);
1277                                 pp->mpp->force_readonly = 0;
1278                                 condlog(2, "%s: map %s reloaded (retval %d)",
1279                                         uev->kernel, mpp->alias, retval);
1280                         }
1281                 }
1282         }
1283 out:
1284         lock_cleanup_pop(vecs->lock);
1285         if (!pp) {
1286                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1287                 if (uev->udev) {
1288                         int flag = DI_SYSFS | DI_WWID;
1289
1290                         conf = get_multipath_config();
1291                         pthread_cleanup_push(put_multipath_config, conf);
1292                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1293                         pthread_cleanup_pop(1);
1294
1295                         if (retval == PATHINFO_SKIPPED) {
1296                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1297                                 return 0;
1298                         }
1299                 }
1300
1301                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1302         }
1303         if (needs_reinit)
1304                 retval = uev_add_path(uev, vecs, 1);
1305         return retval;
1306 }
1307
1308 static int
1309 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1310 {
1311         char *action = NULL, *devt = NULL;
1312         struct path *pp;
1313         int r = 1;
1314
1315         action = uevent_get_dm_action(uev);
1316         if (!action)
1317                 return 1;
1318         if (strncmp(action, "PATH_FAILED", 11))
1319                 goto out;
1320         devt = uevent_get_dm_path(uev);
1321         if (!devt) {
1322                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1323                 goto out;
1324         }
1325
1326         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1327         lock(&vecs->lock);
1328         pthread_testcancel();
1329         pp = find_path_by_devt(vecs->pathvec, devt);
1330         if (!pp)
1331                 goto out_lock;
1332         r = io_err_stat_handle_pathfail(pp);
1333         if (r)
1334                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1335                                 pp->dev);
1336 out_lock:
1337         lock_cleanup_pop(vecs->lock);
1338         FREE(devt);
1339         FREE(action);
1340         return r;
1341 out:
1342         FREE(action);
1343         return 1;
1344 }
1345
1346 static int
1347 map_discovery (struct vectors * vecs)
1348 {
1349         struct multipath * mpp;
1350         unsigned int i;
1351
1352         if (dm_get_maps(vecs->mpvec))
1353                 return 1;
1354
1355         vector_foreach_slot (vecs->mpvec, mpp, i)
1356                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1357                     update_multipath_status(mpp)) {
1358                         remove_map(mpp, vecs, 1);
1359                         i--;
1360                 }
1361
1362         return 0;
1363 }
1364
1365 int
1366 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1367                 void * trigger_data)
1368 {
1369         struct vectors * vecs;
1370         int r;
1371
1372         *reply = NULL;
1373         *len = 0;
1374         vecs = (struct vectors *)trigger_data;
1375
1376         if ((str != NULL) && (is_root == false) &&
1377             (strncmp(str, "list", strlen("list")) != 0) &&
1378             (strncmp(str, "show", strlen("show")) != 0)) {
1379                 *reply = STRDUP("permission deny: need to be root");
1380                 if (*reply)
1381                         *len = strlen(*reply) + 1;
1382                 return 1;
1383         }
1384
1385         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1386
1387         if (r > 0) {
1388                 if (r == ETIMEDOUT)
1389                         *reply = STRDUP("timeout\n");
1390                 else
1391                         *reply = STRDUP("fail\n");
1392                 if (*reply)
1393                         *len = strlen(*reply) + 1;
1394                 r = 1;
1395         }
1396         else if (!r && *len == 0) {
1397                 *reply = STRDUP("ok\n");
1398                 if (*reply)
1399                         *len = strlen(*reply) + 1;
1400                 r = 0;
1401         }
1402         /* else if (r < 0) leave *reply alone */
1403
1404         return r;
1405 }
1406
1407 int
1408 uev_trigger (struct uevent * uev, void * trigger_data)
1409 {
1410         int r = 0;
1411         struct vectors * vecs;
1412         struct uevent *merge_uev, *tmp;
1413
1414         vecs = (struct vectors *)trigger_data;
1415
1416         pthread_cleanup_push(config_cleanup, NULL);
1417         pthread_mutex_lock(&config_lock);
1418         if (running_state != DAEMON_IDLE &&
1419             running_state != DAEMON_RUNNING)
1420                 pthread_cond_wait(&config_cond, &config_lock);
1421         pthread_cleanup_pop(1);
1422
1423         if (running_state == DAEMON_SHUTDOWN)
1424                 return 0;
1425
1426         /*
1427          * device map event
1428          * Add events are ignored here as the tables
1429          * are not fully initialised then.
1430          */
1431         if (!strncmp(uev->kernel, "dm-", 3)) {
1432                 if (!uevent_is_mpath(uev)) {
1433                         if (!strncmp(uev->action, "change", 6))
1434                                 (void)add_foreign(uev->udev);
1435                         else if (!strncmp(uev->action, "remove", 6))
1436                                 (void)delete_foreign(uev->udev);
1437                         goto out;
1438                 }
1439                 if (!strncmp(uev->action, "change", 6)) {
1440                         r = uev_add_map(uev, vecs);
1441
1442                         /*
1443                          * the kernel-side dm-mpath issues a PATH_FAILED event
1444                          * when it encounters a path IO error. It is reason-
1445                          * able be the entry of path IO error accounting pro-
1446                          * cess.
1447                          */
1448                         uev_pathfail_check(uev, vecs);
1449                 } else if (!strncmp(uev->action, "remove", 6)) {
1450                         r = uev_remove_map(uev, vecs);
1451                 }
1452                 goto out;
1453         }
1454
1455         /*
1456          * path add/remove/change event, add/remove maybe merged
1457          */
1458         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1459                 if (!strncmp(merge_uev->action, "add", 3))
1460                         r += uev_add_path(merge_uev, vecs, 0);
1461                 if (!strncmp(merge_uev->action, "remove", 6))
1462                         r += uev_remove_path(merge_uev, vecs, 0);
1463         }
1464
1465         if (!strncmp(uev->action, "add", 3))
1466                 r += uev_add_path(uev, vecs, 1);
1467         if (!strncmp(uev->action, "remove", 6))
1468                 r += uev_remove_path(uev, vecs, 1);
1469         if (!strncmp(uev->action, "change", 6))
1470                 r += uev_update_path(uev, vecs);
1471
1472 out:
1473         return r;
1474 }
1475
1476 static void rcu_unregister(void *param)
1477 {
1478         rcu_unregister_thread();
1479 }
1480
1481 static void *
1482 ueventloop (void * ap)
1483 {
1484         struct udev *udev = ap;
1485
1486         pthread_cleanup_push(rcu_unregister, NULL);
1487         rcu_register_thread();
1488         if (uevent_listen(udev))
1489                 condlog(0, "error starting uevent listener");
1490         pthread_cleanup_pop(1);
1491         return NULL;
1492 }
1493
1494 static void *
1495 uevqloop (void * ap)
1496 {
1497         pthread_cleanup_push(rcu_unregister, NULL);
1498         rcu_register_thread();
1499         if (uevent_dispatch(&uev_trigger, ap))
1500                 condlog(0, "error starting uevent dispatcher");
1501         pthread_cleanup_pop(1);
1502         return NULL;
1503 }
1504 static void *
1505 uxlsnrloop (void * ap)
1506 {
1507         long ux_sock;
1508
1509         pthread_cleanup_push(rcu_unregister, NULL);
1510         rcu_register_thread();
1511
1512         ux_sock = ux_socket_listen(DEFAULT_SOCKET);
1513         if (ux_sock == -1) {
1514                 condlog(1, "could not create uxsock: %d", errno);
1515                 exit_daemon();
1516                 goto out;
1517         }
1518         pthread_cleanup_push(uxsock_cleanup, (void *)ux_sock);
1519
1520         if (cli_init()) {
1521                 condlog(1, "Failed to init uxsock listener");
1522                 exit_daemon();
1523                 goto out_sock;
1524         }
1525
1526         /* Tell main thread that thread has started */
1527         post_config_state(DAEMON_CONFIGURE);
1528
1529         set_handler_callback(LIST+PATHS, cli_list_paths);
1530         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1531         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1532         set_handler_callback(LIST+PATH, cli_list_path);
1533         set_handler_callback(LIST+MAPS, cli_list_maps);
1534         set_handler_callback(LIST+STATUS, cli_list_status);
1535         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1536         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1537         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1538         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1539         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1540         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1541         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1542         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1543         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1544         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1545         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1546         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1547         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1548         set_handler_callback(LIST+CONFIG, cli_list_config);
1549         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1550         set_handler_callback(LIST+DEVICES, cli_list_devices);
1551         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1552         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1553         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1554         set_handler_callback(ADD+PATH, cli_add_path);
1555         set_handler_callback(DEL+PATH, cli_del_path);
1556         set_handler_callback(ADD+MAP, cli_add_map);
1557         set_handler_callback(DEL+MAP, cli_del_map);
1558         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1559         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1560         set_handler_callback(SUSPEND+MAP, cli_suspend);
1561         set_handler_callback(RESUME+MAP, cli_resume);
1562         set_handler_callback(RESIZE+MAP, cli_resize);
1563         set_handler_callback(RELOAD+MAP, cli_reload);
1564         set_handler_callback(RESET+MAP, cli_reassign);
1565         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1566         set_handler_callback(FAIL+PATH, cli_fail);
1567         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1568         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1569         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1570         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1571         set_unlocked_handler_callback(QUIT, cli_quit);
1572         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1573         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1574         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1575         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1576         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1577         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1578         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1579         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1580         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1581
1582         umask(077);
1583         uxsock_listen(&uxsock_trigger, ux_sock, ap);
1584
1585 out_sock:
1586         pthread_cleanup_pop(1); /* uxsock_cleanup */
1587 out:
1588         pthread_cleanup_pop(1); /* rcu_unregister */
1589         return NULL;
1590 }
1591
1592 void
1593 exit_daemon (void)
1594 {
1595         post_config_state(DAEMON_SHUTDOWN);
1596 }
1597
1598 static void
1599 fail_path (struct path * pp, int del_active)
1600 {
1601         if (!pp->mpp)
1602                 return;
1603
1604         condlog(2, "checker failed path %s in map %s",
1605                  pp->dev_t, pp->mpp->alias);
1606
1607         dm_fail_path(pp->mpp->alias, pp->dev_t);
1608         if (del_active)
1609                 update_queue_mode_del_path(pp->mpp);
1610 }
1611
1612 /*
1613  * caller must have locked the path list before calling that function
1614  */
1615 static int
1616 reinstate_path (struct path * pp, int add_active)
1617 {
1618         int ret = 0;
1619
1620         if (!pp->mpp)
1621                 return 0;
1622
1623         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1624                 condlog(0, "%s: reinstate failed", pp->dev_t);
1625                 ret = 1;
1626         } else {
1627                 condlog(2, "%s: reinstated", pp->dev_t);
1628                 if (add_active)
1629                         update_queue_mode_add_path(pp->mpp);
1630         }
1631         return ret;
1632 }
1633
1634 static void
1635 enable_group(struct path * pp)
1636 {
1637         struct pathgroup * pgp;
1638
1639         /*
1640          * if path is added through uev_add_path, pgindex can be unset.
1641          * next update_strings() will set it, upon map reload event.
1642          *
1643          * we can safely return here, because upon map reload, all
1644          * PG will be enabled.
1645          */
1646         if (!pp->mpp->pg || !pp->pgindex)
1647                 return;
1648
1649         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1650
1651         if (pgp->status == PGSTATE_DISABLED) {
1652                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1653                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1654         }
1655 }
1656
1657 static void
1658 mpvec_garbage_collector (struct vectors * vecs)
1659 {
1660         struct multipath * mpp;
1661         unsigned int i;
1662
1663         if (!vecs->mpvec)
1664                 return;
1665
1666         vector_foreach_slot (vecs->mpvec, mpp, i) {
1667                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1668                         condlog(2, "%s: remove dead map", mpp->alias);
1669                         remove_map_and_stop_waiter(mpp, vecs);
1670                         i--;
1671                 }
1672         }
1673 }
1674
1675 /* This is called after a path has started working again. It the multipath
1676  * device for this path uses the followover failback type, and this is the
1677  * best pathgroup, and this is the first path in the pathgroup to come back
1678  * up, then switch to this pathgroup */
1679 static int
1680 followover_should_failback(struct path * pp)
1681 {
1682         struct pathgroup * pgp;
1683         struct path *pp1;
1684         int i;
1685
1686         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1687             !pp->mpp->pg || !pp->pgindex ||
1688             pp->pgindex != pp->mpp->bestpg)
1689                 return 0;
1690
1691         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1692         vector_foreach_slot(pgp->paths, pp1, i) {
1693                 if (pp1 == pp)
1694                         continue;
1695                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1696                         return 0;
1697         }
1698         return 1;
1699 }
1700
1701 static void
1702 missing_uev_wait_tick(struct vectors *vecs)
1703 {
1704         struct multipath * mpp;
1705         unsigned int i;
1706         int timed_out = 0, delayed_reconfig;
1707         struct config *conf;
1708
1709         vector_foreach_slot (vecs->mpvec, mpp, i) {
1710                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1711                         timed_out = 1;
1712                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1713                         if (mpp->wait_for_udev > 1 &&
1714                             update_map(mpp, vecs, 0)) {
1715                                 /* update_map removed map */
1716                                 i--;
1717                                 continue;
1718                         }
1719                         mpp->wait_for_udev = 0;
1720                 }
1721         }
1722
1723         conf = get_multipath_config();
1724         delayed_reconfig = conf->delayed_reconfig;
1725         put_multipath_config(conf);
1726         if (timed_out && delayed_reconfig &&
1727             !need_to_delay_reconfig(vecs)) {
1728                 condlog(2, "reconfigure (delayed)");
1729                 set_config_state(DAEMON_CONFIGURE);
1730         }
1731 }
1732
1733 static void
1734 ghost_delay_tick(struct vectors *vecs)
1735 {
1736         struct multipath * mpp;
1737         unsigned int i;
1738
1739         vector_foreach_slot (vecs->mpvec, mpp, i) {
1740                 if (mpp->ghost_delay_tick <= 0)
1741                         continue;
1742                 if (--mpp->ghost_delay_tick <= 0) {
1743                         condlog(0, "%s: timed out waiting for active path",
1744                                 mpp->alias);
1745                         mpp->force_udev_reload = 1;
1746                         if (update_map(mpp, vecs, 0) != 0) {
1747                                 /* update_map removed map */
1748                                 i--;
1749                                 continue;
1750                         }
1751                 }
1752         }
1753 }
1754
1755 static void
1756 defered_failback_tick (vector mpvec)
1757 {
1758         struct multipath * mpp;
1759         unsigned int i;
1760
1761         vector_foreach_slot (mpvec, mpp, i) {
1762                 /*
1763                  * deferred failback getting sooner
1764                  */
1765                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1766                         mpp->failback_tick--;
1767
1768                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1769                                 switch_pathgroup(mpp);
1770                 }
1771         }
1772 }
1773
1774 static void
1775 retry_count_tick(vector mpvec)
1776 {
1777         struct multipath *mpp;
1778         unsigned int i;
1779
1780         vector_foreach_slot (mpvec, mpp, i) {
1781                 if (mpp->retry_tick > 0) {
1782                         mpp->stat_total_queueing_time++;
1783                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1784                         if(--mpp->retry_tick == 0) {
1785                                 mpp->stat_map_failures++;
1786                                 dm_queue_if_no_path(mpp->alias, 0);
1787                                 condlog(2, "%s: Disable queueing", mpp->alias);
1788                         }
1789                 }
1790         }
1791 }
1792
1793 int update_prio(struct path *pp, int refresh_all)
1794 {
1795         int oldpriority;
1796         struct path *pp1;
1797         struct pathgroup * pgp;
1798         int i, j, changed = 0;
1799         struct config *conf;
1800
1801         if (refresh_all) {
1802                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1803                         vector_foreach_slot (pgp->paths, pp1, j) {
1804                                 oldpriority = pp1->priority;
1805                                 conf = get_multipath_config();
1806                                 pthread_cleanup_push(put_multipath_config,
1807                                                      conf);
1808                                 pathinfo(pp1, conf, DI_PRIO);
1809                                 pthread_cleanup_pop(1);
1810                                 if (pp1->priority != oldpriority)
1811                                         changed = 1;
1812                         }
1813                 }
1814                 return changed;
1815         }
1816         oldpriority = pp->priority;
1817         conf = get_multipath_config();
1818         pthread_cleanup_push(put_multipath_config, conf);
1819         if (pp->state != PATH_DOWN)
1820                 pathinfo(pp, conf, DI_PRIO);
1821         pthread_cleanup_pop(1);
1822
1823         if (pp->priority == oldpriority)
1824                 return 0;
1825         return 1;
1826 }
1827
1828 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1829 {
1830         if (reload_map(vecs, mpp, refresh, 1))
1831                 return 1;
1832
1833         dm_lib_release();
1834         if (setup_multipath(vecs, mpp) != 0)
1835                 return 1;
1836         sync_map_state(mpp);
1837
1838         return 0;
1839 }
1840
1841 static int check_path_reinstate_state(struct path * pp) {
1842         struct timespec curr_time;
1843
1844         /*
1845          * This function is only called when the path state changes
1846          * from "bad" to "good". pp->state reflects the *previous* state.
1847          * If this was "bad", we know that a failure must have occured
1848          * beforehand, and count that.
1849          * Note that we count path state _changes_ this way. If a path
1850          * remains in "bad" state, failure count is not increased.
1851          */
1852
1853         if (!((pp->mpp->san_path_err_threshold > 0) &&
1854                                 (pp->mpp->san_path_err_forget_rate > 0) &&
1855                                 (pp->mpp->san_path_err_recovery_time >0))) {
1856                 return 0;
1857         }
1858
1859         if (pp->disable_reinstate) {
1860                 /* If we don't know how much time has passed, automatically
1861                  * reinstate the path, just to be safe. Also, if there are
1862                  * no other usable paths, reinstate the path
1863                  */
1864                 if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0 ||
1865                                 pp->mpp->nr_active == 0) {
1866                         condlog(2, "%s : reinstating path early", pp->dev);
1867                         goto reinstate_path;
1868                 }
1869                 if ((curr_time.tv_sec - pp->dis_reinstate_time ) > pp->mpp->san_path_err_recovery_time) {
1870                         condlog(2,"%s : reinstate the path after err recovery time", pp->dev);
1871                         goto reinstate_path;
1872                 }
1873                 return 1;
1874         }
1875         /* forget errors on a working path */
1876         if ((pp->state == PATH_UP || pp->state == PATH_GHOST) &&
1877                         pp->path_failures > 0) {
1878                 if (pp->san_path_err_forget_rate > 0){
1879                         pp->san_path_err_forget_rate--;
1880                 } else {
1881                         /* for every san_path_err_forget_rate number of
1882                          * successful path checks decrement path_failures by 1
1883                          */
1884                         pp->path_failures--;
1885                         pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1886                 }
1887                 return 0;
1888         }
1889
1890         /* If the path isn't recovering from a failed state, do nothing */
1891         if (pp->state != PATH_DOWN && pp->state != PATH_SHAKY &&
1892                         pp->state != PATH_TIMEOUT)
1893                 return 0;
1894
1895         if (pp->path_failures == 0)
1896                 pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1897
1898         pp->path_failures++;
1899
1900         /* if we don't know the currently time, we don't know how long to
1901          * delay the path, so there's no point in checking if we should
1902          */
1903
1904         if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0)
1905                 return 0;
1906         /* when path failures has exceeded the san_path_err_threshold
1907          * place the path in delayed state till san_path_err_recovery_time
1908          * so that the cutomer can rectify the issue within this time. After
1909          * the completion of san_path_err_recovery_time it should
1910          * automatically reinstate the path
1911          */
1912         if (pp->path_failures > pp->mpp->san_path_err_threshold) {
1913                 condlog(2, "%s : hit error threshold. Delaying path reinstatement", pp->dev);
1914                 pp->dis_reinstate_time = curr_time.tv_sec;
1915                 pp->disable_reinstate = 1;
1916
1917                 return 1;
1918         } else {
1919                 return 0;
1920         }
1921
1922 reinstate_path:
1923         pp->path_failures = 0;
1924         pp->disable_reinstate = 0;
1925         pp->san_path_err_forget_rate = 0;
1926         return 0;
1927 }
1928
1929 /*
1930  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1931  * and '0' otherwise
1932  */
1933 int
1934 check_path (struct vectors * vecs, struct path * pp, int ticks)
1935 {
1936         int newstate;
1937         int new_path_up = 0;
1938         int chkr_new_path_up = 0;
1939         int add_active;
1940         int disable_reinstate = 0;
1941         int oldchkrstate = pp->chkrstate;
1942         int retrigger_tries, checkint, max_checkint, verbosity;
1943         struct config *conf;
1944         int ret;
1945
1946         if ((pp->initialized == INIT_OK ||
1947              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1948                 return 0;
1949
1950         if (pp->tick)
1951                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1952         if (pp->tick)
1953                 return 0; /* don't check this path yet */
1954
1955         conf = get_multipath_config();
1956         retrigger_tries = conf->retrigger_tries;
1957         checkint = conf->checkint;
1958         max_checkint = conf->max_checkint;
1959         verbosity = conf->verbosity;
1960         put_multipath_config(conf);
1961
1962         if (pp->checkint == CHECKINT_UNDEF) {
1963                 condlog(0, "%s: BUG: checkint is not set", pp->dev);
1964                 pp->checkint = checkint;
1965         };
1966
1967         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
1968                 if (pp->retriggers < retrigger_tries) {
1969                         condlog(2, "%s: triggering change event to reinitialize",
1970                                 pp->dev);
1971                         pp->initialized = INIT_REQUESTED_UDEV;
1972                         pp->retriggers++;
1973                         sysfs_attr_set_value(pp->udev, "uevent", "change",
1974                                              strlen("change"));
1975                         return 0;
1976                 } else {
1977                         condlog(1, "%s: not initialized after %d udev retriggers",
1978                                 pp->dev, retrigger_tries);
1979                         /*
1980                          * Make sure that the "add missing path" code path
1981                          * below may reinstate the path later, if it ever
1982                          * comes up again.
1983                          * The WWID needs not be cleared; if it was set, the
1984                          * state hadn't been INIT_MISSING_UDEV in the first
1985                          * place.
1986                          */
1987                         pp->initialized = INIT_FAILED;
1988                         return 0;
1989                 }
1990         }
1991
1992         /*
1993          * provision a next check soonest,
1994          * in case we exit abnormaly from here
1995          */
1996         pp->tick = checkint;
1997
1998         newstate = path_offline(pp);
1999         if (newstate == PATH_UP) {
2000                 conf = get_multipath_config();
2001                 pthread_cleanup_push(put_multipath_config, conf);
2002                 newstate = get_state(pp, conf, 1, newstate);
2003                 pthread_cleanup_pop(1);
2004         } else {
2005                 checker_clear_message(&pp->checker);
2006                 condlog(3, "%s: state %s, checker not called",
2007                         pp->dev, checker_state_name(newstate));
2008         }
2009         /*
2010          * Wait for uevent for removed paths;
2011          * some LLDDs like zfcp keep paths unavailable
2012          * without sending uevents.
2013          */
2014         if (newstate == PATH_REMOVED)
2015                 newstate = PATH_DOWN;
2016
2017         if (pp->wwid_changed) {
2018                 condlog(2, "%s: path wwid has changed. Refusing to use",
2019                         pp->dev);
2020                 newstate = PATH_DOWN;
2021         }
2022
2023         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
2024                 condlog(2, "%s: unusable path (%s) - checker failed",
2025                         pp->dev, checker_state_name(newstate));
2026                 LOG_MSG(2, verbosity, pp);
2027                 conf = get_multipath_config();
2028                 pthread_cleanup_push(put_multipath_config, conf);
2029                 pathinfo(pp, conf, 0);
2030                 pthread_cleanup_pop(1);
2031                 return 1;
2032         }
2033         if (!pp->mpp) {
2034                 if (!strlen(pp->wwid) &&
2035                     (pp->initialized == INIT_FAILED ||
2036                      pp->initialized == INIT_NEW) &&
2037                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
2038                         condlog(2, "%s: add missing path", pp->dev);
2039                         conf = get_multipath_config();
2040                         pthread_cleanup_push(put_multipath_config, conf);
2041                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
2042                         pthread_cleanup_pop(1);
2043                         /* INIT_OK implies ret == PATHINFO_OK */
2044                         if (pp->initialized == INIT_OK) {
2045                                 ev_add_path(pp, vecs, 1);
2046                                 pp->tick = 1;
2047                         } else {
2048                                 /*
2049                                  * We failed multiple times to initialize this
2050                                  * path properly. Don't re-check too often.
2051                                  */
2052                                 pp->checkint = max_checkint;
2053                                 if (ret == PATHINFO_SKIPPED)
2054                                         return -1;
2055                         }
2056                 }
2057                 return 0;
2058         }
2059         /*
2060          * Async IO in flight. Keep the previous path state
2061          * and reschedule as soon as possible
2062          */
2063         if (newstate == PATH_PENDING) {
2064                 pp->tick = 1;
2065                 return 0;
2066         }
2067         /*
2068          * Synchronize with kernel state
2069          */
2070         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
2071                 condlog(1, "%s: Could not synchronize with kernel state",
2072                         pp->dev);
2073                 pp->dmstate = PSTATE_UNDEF;
2074         }
2075         /* if update_multipath_strings orphaned the path, quit early */
2076         if (!pp->mpp)
2077                 return 0;
2078         set_no_path_retry(pp->mpp);
2079
2080         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
2081                         check_path_reinstate_state(pp)) {
2082                 pp->state = PATH_DELAYED;
2083                 return 1;
2084         }
2085
2086         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
2087             pp->io_err_disable_reinstate && need_io_err_check(pp)) {
2088                 pp->state = PATH_SHAKY;
2089                 /*
2090                  * to reschedule as soon as possible,so that this path can
2091                  * be recoverd in time
2092                  */
2093                 pp->tick = 1;
2094                 return 1;
2095         }
2096
2097         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
2098              pp->wait_checks > 0) {
2099                 if (pp->mpp->nr_active > 0) {
2100                         pp->state = PATH_DELAYED;
2101                         pp->wait_checks--;
2102                         return 1;
2103                 } else
2104                         pp->wait_checks = 0;
2105         }
2106
2107         /*
2108          * don't reinstate failed path, if its in stand-by
2109          * and if target supports only implicit tpgs mode.
2110          * this will prevent unnecessary i/o by dm on stand-by
2111          * paths if there are no other active paths in map.
2112          */
2113         disable_reinstate = (newstate == PATH_GHOST &&
2114                             pp->mpp->nr_active == 0 &&
2115                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
2116
2117         pp->chkrstate = newstate;
2118         if (newstate != pp->state) {
2119                 int oldstate = pp->state;
2120                 pp->state = newstate;
2121
2122                 LOG_MSG(1, verbosity, pp);
2123
2124                 /*
2125                  * upon state change, reset the checkint
2126                  * to the shortest delay
2127                  */
2128                 conf = get_multipath_config();
2129                 pp->checkint = conf->checkint;
2130                 put_multipath_config(conf);
2131
2132                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
2133                         /*
2134                          * proactively fail path in the DM
2135                          */
2136                         if (oldstate == PATH_UP ||
2137                             oldstate == PATH_GHOST) {
2138                                 fail_path(pp, 1);
2139                                 if (pp->mpp->delay_wait_checks > 0 &&
2140                                     pp->watch_checks > 0) {
2141                                         pp->wait_checks = pp->mpp->delay_wait_checks;
2142                                         pp->watch_checks = 0;
2143                                 }
2144                         } else {
2145                                 fail_path(pp, 0);
2146                                 if (pp->wait_checks > 0)
2147                                         pp->wait_checks =
2148                                                 pp->mpp->delay_wait_checks;
2149                         }
2150
2151                         /*
2152                          * cancel scheduled failback
2153                          */
2154                         pp->mpp->failback_tick = 0;
2155
2156                         pp->mpp->stat_path_failures++;
2157                         return 1;
2158                 }
2159
2160                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2161                         if (pp->mpp->prflag) {
2162                                 /*
2163                                  * Check Persistent Reservation.
2164                                  */
2165                                 condlog(2, "%s: checking persistent "
2166                                         "reservation registration", pp->dev);
2167                                 mpath_pr_event_handle(pp);
2168                         }
2169                 }
2170
2171                 /*
2172                  * reinstate this path
2173                  */
2174                 if (oldstate != PATH_UP &&
2175                     oldstate != PATH_GHOST) {
2176                         if (pp->mpp->delay_watch_checks > 0)
2177                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2178                         add_active = 1;
2179                 } else {
2180                         if (pp->watch_checks > 0)
2181                                 pp->watch_checks--;
2182                         add_active = 0;
2183                 }
2184                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2185                         condlog(3, "%s: reload map", pp->dev);
2186                         ev_add_path(pp, vecs, 1);
2187                         pp->tick = 1;
2188                         return 0;
2189                 }
2190                 new_path_up = 1;
2191
2192                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2193                         chkr_new_path_up = 1;
2194
2195                 /*
2196                  * if at least one path is up in a group, and
2197                  * the group is disabled, re-enable it
2198                  */
2199                 if (newstate == PATH_UP)
2200                         enable_group(pp);
2201         }
2202         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2203                 if ((pp->dmstate == PSTATE_FAILED ||
2204                     pp->dmstate == PSTATE_UNDEF) &&
2205                     !disable_reinstate) {
2206                         /* Clear IO errors */
2207                         if (reinstate_path(pp, 0)) {
2208                                 condlog(3, "%s: reload map", pp->dev);
2209                                 ev_add_path(pp, vecs, 1);
2210                                 pp->tick = 1;
2211                                 return 0;
2212                         }
2213                 } else {
2214                         LOG_MSG(4, verbosity, pp);
2215                         if (pp->checkint != max_checkint) {
2216                                 /*
2217                                  * double the next check delay.
2218                                  * max at conf->max_checkint
2219                                  */
2220                                 if (pp->checkint < (max_checkint / 2))
2221                                         pp->checkint = 2 * pp->checkint;
2222                                 else
2223                                         pp->checkint = max_checkint;
2224
2225                                 condlog(4, "%s: delay next check %is",
2226                                         pp->dev_t, pp->checkint);
2227                         }
2228                         if (pp->watch_checks > 0)
2229                                 pp->watch_checks--;
2230                         pp->tick = pp->checkint;
2231                 }
2232         }
2233         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2234                 if (pp->dmstate == PSTATE_ACTIVE ||
2235                     pp->dmstate == PSTATE_UNDEF)
2236                         fail_path(pp, 0);
2237                 if (newstate == PATH_DOWN) {
2238                         int log_checker_err;
2239
2240                         conf = get_multipath_config();
2241                         log_checker_err = conf->log_checker_err;
2242                         put_multipath_config(conf);
2243                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2244                                 LOG_MSG(3, verbosity, pp);
2245                         else
2246                                 LOG_MSG(2, verbosity, pp);
2247                 }
2248         }
2249
2250         pp->state = newstate;
2251
2252         if (pp->mpp->wait_for_udev)
2253                 return 1;
2254         /*
2255          * path prio refreshing
2256          */
2257         condlog(4, "path prio refresh");
2258
2259         if (update_prio(pp, new_path_up) &&
2260             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2261              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2262                 update_path_groups(pp->mpp, vecs, !new_path_up);
2263         else if (need_switch_pathgroup(pp->mpp, 0)) {
2264                 if (pp->mpp->pgfailback > 0 &&
2265                     (new_path_up || pp->mpp->failback_tick <= 0))
2266                         pp->mpp->failback_tick =
2267                                 pp->mpp->pgfailback + 1;
2268                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2269                          (chkr_new_path_up && followover_should_failback(pp)))
2270                         switch_pathgroup(pp->mpp);
2271         }
2272         return 1;
2273 }
2274
2275 static void *
2276 checkerloop (void *ap)
2277 {
2278         struct vectors *vecs;
2279         struct path *pp;
2280         int count = 0;
2281         unsigned int i;
2282         struct timespec last_time;
2283         struct config *conf;
2284         int foreign_tick = 0;
2285
2286         pthread_cleanup_push(rcu_unregister, NULL);
2287         rcu_register_thread();
2288         mlockall(MCL_CURRENT | MCL_FUTURE);
2289         vecs = (struct vectors *)ap;
2290         condlog(2, "path checkers start up");
2291
2292         /* Tweak start time for initial path check */
2293         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2294                 last_time.tv_sec = 0;
2295         else
2296                 last_time.tv_sec -= 1;
2297
2298         while (1) {
2299                 struct timespec diff_time, start_time, end_time;
2300                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2301
2302                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2303                         start_time.tv_sec = 0;
2304                 if (start_time.tv_sec && last_time.tv_sec) {
2305                         timespecsub(&start_time, &last_time, &diff_time);
2306                         condlog(4, "tick (%lu.%06lu secs)",
2307                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2308                         last_time = start_time;
2309                         ticks = diff_time.tv_sec;
2310                 } else {
2311                         ticks = 1;
2312                         condlog(4, "tick (%d ticks)", ticks);
2313                 }
2314 #ifdef USE_SYSTEMD
2315                 if (use_watchdog)
2316                         sd_notify(0, "WATCHDOG=1");
2317 #endif
2318                 rc = set_config_state(DAEMON_RUNNING);
2319                 if (rc == ETIMEDOUT) {
2320                         condlog(4, "timeout waiting for DAEMON_IDLE");
2321                         continue;
2322                 } else if (rc == EINVAL)
2323                         /* daemon shutdown */
2324                         break;
2325
2326                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2327                 lock(&vecs->lock);
2328                 pthread_testcancel();
2329                 vector_foreach_slot (vecs->pathvec, pp, i) {
2330                         rc = check_path(vecs, pp, ticks);
2331                         if (rc < 0) {
2332                                 vector_del_slot(vecs->pathvec, i);
2333                                 free_path(pp);
2334                                 i--;
2335                         } else
2336                                 num_paths += rc;
2337                 }
2338                 lock_cleanup_pop(vecs->lock);
2339
2340                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2341                 lock(&vecs->lock);
2342                 pthread_testcancel();
2343                 defered_failback_tick(vecs->mpvec);
2344                 retry_count_tick(vecs->mpvec);
2345                 missing_uev_wait_tick(vecs);
2346                 ghost_delay_tick(vecs);
2347                 lock_cleanup_pop(vecs->lock);
2348
2349                 if (count)
2350                         count--;
2351                 else {
2352                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2353                         lock(&vecs->lock);
2354                         pthread_testcancel();
2355                         condlog(4, "map garbage collection");
2356                         mpvec_garbage_collector(vecs);
2357                         count = MAPGCINT;
2358                         lock_cleanup_pop(vecs->lock);
2359                 }
2360
2361                 diff_time.tv_nsec = 0;
2362                 if (start_time.tv_sec &&
2363                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2364                         timespecsub(&end_time, &start_time, &diff_time);
2365                         if (num_paths) {
2366                                 unsigned int max_checkint;
2367
2368                                 condlog(4, "checked %d path%s in %lu.%06lu secs",
2369                                         num_paths, num_paths > 1 ? "s" : "",
2370                                         diff_time.tv_sec,
2371                                         diff_time.tv_nsec / 1000);
2372                                 conf = get_multipath_config();
2373                                 max_checkint = conf->max_checkint;
2374                                 put_multipath_config(conf);
2375                                 if (diff_time.tv_sec > max_checkint)
2376                                         condlog(1, "path checkers took longer "
2377                                                 "than %lu seconds, consider "
2378                                                 "increasing max_polling_interval",
2379                                                 diff_time.tv_sec);
2380                         }
2381                 }
2382
2383                 if (foreign_tick == 0) {
2384                         conf = get_multipath_config();
2385                         foreign_tick = conf->max_checkint;
2386                         put_multipath_config(conf);
2387                 }
2388                 if (--foreign_tick == 0)
2389                         check_foreign();
2390
2391                 post_config_state(DAEMON_IDLE);
2392                 conf = get_multipath_config();
2393                 strict_timing = conf->strict_timing;
2394                 put_multipath_config(conf);
2395                 if (!strict_timing)
2396                         sleep(1);
2397                 else {
2398                         if (diff_time.tv_nsec) {
2399                                 diff_time.tv_sec = 0;
2400                                 diff_time.tv_nsec =
2401                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2402                         } else
2403                                 diff_time.tv_sec = 1;
2404
2405                         condlog(3, "waiting for %lu.%06lu secs",
2406                                 diff_time.tv_sec,
2407                                 diff_time.tv_nsec / 1000);
2408                         if (nanosleep(&diff_time, NULL) != 0) {
2409                                 condlog(3, "nanosleep failed with error %d",
2410                                         errno);
2411                                 conf = get_multipath_config();
2412                                 conf->strict_timing = 0;
2413                                 put_multipath_config(conf);
2414                                 break;
2415                         }
2416                 }
2417         }
2418         pthread_cleanup_pop(1);
2419         return NULL;
2420 }
2421
2422 int
2423 configure (struct vectors * vecs)
2424 {
2425         struct multipath * mpp;
2426         struct path * pp;
2427         vector mpvec;
2428         int i, ret;
2429         struct config *conf;
2430         static int force_reload = FORCE_RELOAD_WEAK;
2431
2432         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2433                 condlog(0, "couldn't allocate path vec in configure");
2434                 return 1;
2435         }
2436
2437         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2438                 condlog(0, "couldn't allocate multipath vec in configure");
2439                 return 1;
2440         }
2441
2442         if (!(mpvec = vector_alloc())) {
2443                 condlog(0, "couldn't allocate new maps vec in configure");
2444                 return 1;
2445         }
2446
2447         /*
2448          * probe for current path (from sysfs) and map (from dm) sets
2449          */
2450         ret = path_discovery(vecs->pathvec, DI_ALL);
2451         if (ret < 0) {
2452                 condlog(0, "configure failed at path discovery");
2453                 goto fail;
2454         }
2455
2456         conf = get_multipath_config();
2457         pthread_cleanup_push(put_multipath_config, conf);
2458         vector_foreach_slot (vecs->pathvec, pp, i){
2459                 if (filter_path(conf, pp) > 0){
2460                         vector_del_slot(vecs->pathvec, i);
2461                         free_path(pp);
2462                         i--;
2463                 }
2464         }
2465         pthread_cleanup_pop(1);
2466
2467         if (map_discovery(vecs)) {
2468                 condlog(0, "configure failed at map discovery");
2469                 goto fail;
2470         }
2471
2472         /*
2473          * create new set of maps & push changed ones into dm
2474          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2475          * superfluous ACT_RELOAD ioctls. Later calls are done
2476          * with FORCE_RELOAD_YES.
2477          */
2478         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2479         if (force_reload == FORCE_RELOAD_WEAK)
2480                 force_reload = FORCE_RELOAD_YES;
2481         if (ret != CP_OK) {
2482                 condlog(0, "configure failed while coalescing paths");
2483                 goto fail;
2484         }
2485
2486         /*
2487          * may need to remove some maps which are no longer relevant
2488          * e.g., due to blacklist changes in conf file
2489          */
2490         if (coalesce_maps(vecs, mpvec)) {
2491                 condlog(0, "configure failed while coalescing maps");
2492                 goto fail;
2493         }
2494
2495         dm_lib_release();
2496
2497         sync_maps_state(mpvec);
2498         vector_foreach_slot(mpvec, mpp, i){
2499                 if (remember_wwid(mpp->wwid) == 1)
2500                         trigger_paths_udev_change(mpp, true);
2501                 update_map_pr(mpp);
2502         }
2503
2504         /*
2505          * purge dm of old maps
2506          */
2507         remove_maps(vecs);
2508
2509         /*
2510          * save new set of maps formed by considering current path state
2511          */
2512         vector_free(vecs->mpvec);
2513         vecs->mpvec = mpvec;
2514
2515         /*
2516          * start dm event waiter threads for these new maps
2517          */
2518         vector_foreach_slot(vecs->mpvec, mpp, i) {
2519                 if (wait_for_events(mpp, vecs)) {
2520                         remove_map(mpp, vecs, 1);
2521                         i--;
2522                         continue;
2523                 }
2524                 if (setup_multipath(vecs, mpp))
2525                         i--;
2526         }
2527         return 0;
2528
2529 fail:
2530         vector_free(mpvec);
2531         return 1;
2532 }
2533
2534 int
2535 need_to_delay_reconfig(struct vectors * vecs)
2536 {
2537         struct multipath *mpp;
2538         int i;
2539
2540         if (!VECTOR_SIZE(vecs->mpvec))
2541                 return 0;
2542
2543         vector_foreach_slot(vecs->mpvec, mpp, i) {
2544                 if (mpp->wait_for_udev)
2545                         return 1;
2546         }
2547         return 0;
2548 }
2549
2550 void rcu_free_config(struct rcu_head *head)
2551 {
2552         struct config *conf = container_of(head, struct config, rcu);
2553
2554         free_config(conf);
2555 }
2556
2557 int
2558 reconfigure (struct vectors * vecs)
2559 {
2560         struct config * old, *conf;
2561
2562         conf = load_config(DEFAULT_CONFIGFILE);
2563         if (!conf)
2564                 return 1;
2565
2566         /*
2567          * free old map and path vectors ... they use old conf state
2568          */
2569         if (VECTOR_SIZE(vecs->mpvec))
2570                 remove_maps_and_stop_waiters(vecs);
2571
2572         free_pathvec(vecs->pathvec, FREE_PATHS);
2573         vecs->pathvec = NULL;
2574         delete_all_foreign();
2575
2576         /* Re-read any timezone changes */
2577         tzset();
2578
2579         dm_tgt_version(conf->version, TGT_MPATH);
2580         if (verbosity)
2581                 conf->verbosity = verbosity;
2582         if (bindings_read_only)
2583                 conf->bindings_read_only = bindings_read_only;
2584         uxsock_timeout = conf->uxsock_timeout;
2585
2586         old = rcu_dereference(multipath_conf);
2587         rcu_assign_pointer(multipath_conf, conf);
2588         call_rcu(&old->rcu, rcu_free_config);
2589
2590         configure(vecs);
2591
2592
2593         return 0;
2594 }
2595
2596 static struct vectors *
2597 init_vecs (void)
2598 {
2599         struct vectors * vecs;
2600
2601         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2602
2603         if (!vecs)
2604                 return NULL;
2605
2606         pthread_mutex_init(&vecs->lock.mutex, NULL);
2607
2608         return vecs;
2609 }
2610
2611 static void *
2612 signal_set(int signo, void (*func) (int))
2613 {
2614         int r;
2615         struct sigaction sig;
2616         struct sigaction osig;
2617
2618         sig.sa_handler = func;
2619         sigemptyset(&sig.sa_mask);
2620         sig.sa_flags = 0;
2621
2622         r = sigaction(signo, &sig, &osig);
2623
2624         if (r < 0)
2625                 return (SIG_ERR);
2626         else
2627                 return (osig.sa_handler);
2628 }
2629
2630 void
2631 handle_signals(bool nonfatal)
2632 {
2633         if (exit_sig) {
2634                 condlog(2, "exit (signal)");
2635                 exit_sig = 0;
2636                 exit_daemon();
2637         }
2638         if (!nonfatal)
2639                 return;
2640         if (reconfig_sig) {
2641                 condlog(2, "reconfigure (signal)");
2642                 set_config_state(DAEMON_CONFIGURE);
2643         }
2644         if (log_reset_sig) {
2645                 condlog(2, "reset log (signal)");
2646                 if (logsink == 1)
2647                         log_thread_reset();
2648         }
2649         reconfig_sig = 0;
2650         log_reset_sig = 0;
2651 }
2652
2653 static void
2654 sighup (int sig)
2655 {
2656         reconfig_sig = 1;
2657 }
2658
2659 static void
2660 sigend (int sig)
2661 {
2662         exit_sig = 1;
2663 }
2664
2665 static void
2666 sigusr1 (int sig)
2667 {
2668         log_reset_sig = 1;
2669 }
2670
2671 static void
2672 sigusr2 (int sig)
2673 {
2674         condlog(3, "SIGUSR2 received");
2675 }
2676
2677 static void
2678 signal_init(void)
2679 {
2680         sigset_t set;
2681
2682         /* block all signals */
2683         sigfillset(&set);
2684         /* SIGPIPE occurs if logging fails */
2685         sigdelset(&set, SIGPIPE);
2686         pthread_sigmask(SIG_SETMASK, &set, NULL);
2687
2688         /* Other signals will be unblocked in the uxlsnr thread */
2689         signal_set(SIGHUP, sighup);
2690         signal_set(SIGUSR1, sigusr1);
2691         signal_set(SIGUSR2, sigusr2);
2692         signal_set(SIGINT, sigend);
2693         signal_set(SIGTERM, sigend);
2694         signal_set(SIGPIPE, sigend);
2695 }
2696
2697 static void
2698 setscheduler (void)
2699 {
2700         int res;
2701         static struct sched_param sched_param = {
2702                 .sched_priority = 99
2703         };
2704
2705         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2706
2707         if (res == -1)
2708                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2709         return;
2710 }
2711
2712 static void
2713 set_oom_adj (void)
2714 {
2715 #ifdef OOM_SCORE_ADJ_MIN
2716         int retry = 1;
2717         char *file = "/proc/self/oom_score_adj";
2718         int score = OOM_SCORE_ADJ_MIN;
2719 #else
2720         int retry = 0;
2721         char *file = "/proc/self/oom_adj";
2722         int score = OOM_ADJUST_MIN;
2723 #endif
2724         FILE *fp;
2725         struct stat st;
2726         char *envp;
2727
2728         envp = getenv("OOMScoreAdjust");
2729         if (envp) {
2730                 condlog(3, "Using systemd provided OOMScoreAdjust");
2731                 return;
2732         }
2733         do {
2734                 if (stat(file, &st) == 0){
2735                         fp = fopen(file, "w");
2736                         if (!fp) {
2737                                 condlog(0, "couldn't fopen %s : %s", file,
2738                                         strerror(errno));
2739                                 return;
2740                         }
2741                         fprintf(fp, "%i", score);
2742                         fclose(fp);
2743                         return;
2744                 }
2745                 if (errno != ENOENT) {
2746                         condlog(0, "couldn't stat %s : %s", file,
2747                                 strerror(errno));
2748                         return;
2749                 }
2750 #ifdef OOM_ADJUST_MIN
2751                 file = "/proc/self/oom_adj";
2752                 score = OOM_ADJUST_MIN;
2753 #else
2754                 retry = 0;
2755 #endif
2756         } while (retry--);
2757         condlog(0, "couldn't adjust oom score");
2758 }
2759
2760 static int
2761 child (void * param)
2762 {
2763         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2764         pthread_attr_t log_attr, misc_attr, uevent_attr;
2765         struct vectors * vecs;
2766         struct multipath * mpp;
2767         int i;
2768 #ifdef USE_SYSTEMD
2769         unsigned long checkint;
2770         int startup_done = 0;
2771 #endif
2772         int rc;
2773         int pid_fd = -1;
2774         struct config *conf;
2775         char *envp;
2776         int queue_without_daemon;
2777
2778         mlockall(MCL_CURRENT | MCL_FUTURE);
2779         signal_init();
2780         rcu_init();
2781
2782         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2783         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2784         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2785         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2786
2787         if (logsink == 1) {
2788                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2789                 log_thread_start(&log_attr);
2790                 pthread_attr_destroy(&log_attr);
2791         }
2792         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2793         if (pid_fd < 0) {
2794                 condlog(1, "failed to create pidfile");
2795                 if (logsink == 1)
2796                         log_thread_stop();
2797                 exit(1);
2798         }
2799
2800         post_config_state(DAEMON_START);
2801
2802         condlog(2, "--------start up--------");
2803         condlog(2, "read " DEFAULT_CONFIGFILE);
2804
2805         conf = load_config(DEFAULT_CONFIGFILE);
2806         if (!conf)
2807                 goto failed;
2808
2809         if (verbosity)
2810                 conf->verbosity = verbosity;
2811         if (bindings_read_only)
2812                 conf->bindings_read_only = bindings_read_only;
2813         uxsock_timeout = conf->uxsock_timeout;
2814         rcu_assign_pointer(multipath_conf, conf);
2815         if (init_checkers(conf->multipath_dir)) {
2816                 condlog(0, "failed to initialize checkers");
2817                 goto failed;
2818         }
2819         if (init_prio(conf->multipath_dir)) {
2820                 condlog(0, "failed to initialize prioritizers");
2821                 goto failed;
2822         }
2823         /* Failing this is non-fatal */
2824
2825         init_foreign(conf->multipath_dir);
2826
2827         if (poll_dmevents)
2828                 poll_dmevents = dmevent_poll_supported();
2829         setlogmask(LOG_UPTO(conf->verbosity + 3));
2830
2831         envp = getenv("LimitNOFILE");
2832
2833         if (envp)
2834                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2835         else
2836                 set_max_fds(conf->max_fds);
2837
2838         vecs = gvecs = init_vecs();
2839         if (!vecs)
2840                 goto failed;
2841
2842         setscheduler();
2843         set_oom_adj();
2844
2845 #ifdef USE_SYSTEMD
2846         envp = getenv("WATCHDOG_USEC");
2847         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2848                 /* Value is in microseconds */
2849                 conf->max_checkint = checkint / 1000000;
2850                 /* Rescale checkint */
2851                 if (conf->checkint > conf->max_checkint)
2852                         conf->checkint = conf->max_checkint;
2853                 else
2854                         conf->checkint = conf->max_checkint / 4;
2855                 condlog(3, "enabling watchdog, interval %d max %d",
2856                         conf->checkint, conf->max_checkint);
2857                 use_watchdog = conf->checkint;
2858         }
2859 #endif
2860         /*
2861          * Startup done, invalidate configuration
2862          */
2863         conf = NULL;
2864
2865         pthread_cleanup_push(config_cleanup, NULL);
2866         pthread_mutex_lock(&config_lock);
2867
2868         __post_config_state(DAEMON_IDLE);
2869         rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
2870         if (!rc) {
2871                 /* Wait for uxlsnr startup */
2872                 while (running_state == DAEMON_IDLE)
2873                         pthread_cond_wait(&config_cond, &config_lock);
2874         }
2875         pthread_cleanup_pop(1);
2876
2877         if (rc) {
2878                 condlog(0, "failed to create cli listener: %d", rc);
2879                 goto failed;
2880         }
2881         else if (running_state != DAEMON_CONFIGURE) {
2882                 condlog(0, "cli listener failed to start");
2883                 goto failed;
2884         }
2885
2886         if (poll_dmevents) {
2887                 if (init_dmevent_waiter(vecs)) {
2888                         condlog(0, "failed to allocate dmevents waiter info");
2889                         goto failed;
2890                 }
2891                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2892                                          wait_dmevents, NULL))) {
2893                         condlog(0, "failed to create dmevent waiter thread: %d",
2894                                 rc);
2895                         goto failed;
2896                 }
2897         }
2898
2899         /*
2900          * Start uevent listener early to catch events
2901          */
2902         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2903                 condlog(0, "failed to create uevent thread: %d", rc);
2904                 goto failed;
2905         }
2906         pthread_attr_destroy(&uevent_attr);
2907
2908         /*
2909          * start threads
2910          */
2911         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2912                 condlog(0,"failed to create checker loop thread: %d", rc);
2913                 goto failed;
2914         }
2915         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2916                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2917                 goto failed;
2918         }
2919         pthread_attr_destroy(&misc_attr);
2920
2921         while (running_state != DAEMON_SHUTDOWN) {
2922                 pthread_cleanup_push(config_cleanup, NULL);
2923                 pthread_mutex_lock(&config_lock);
2924                 if (running_state != DAEMON_CONFIGURE &&
2925                     running_state != DAEMON_SHUTDOWN) {
2926                         pthread_cond_wait(&config_cond, &config_lock);
2927                 }
2928                 pthread_cleanup_pop(1);
2929                 if (running_state == DAEMON_CONFIGURE) {
2930                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2931                         lock(&vecs->lock);
2932                         pthread_testcancel();
2933                         if (!need_to_delay_reconfig(vecs)) {
2934                                 reconfigure(vecs);
2935                         } else {
2936                                 conf = get_multipath_config();
2937                                 conf->delayed_reconfig = 1;
2938                                 put_multipath_config(conf);
2939                         }
2940                         lock_cleanup_pop(vecs->lock);
2941                         post_config_state(DAEMON_IDLE);
2942 #ifdef USE_SYSTEMD
2943                         if (!startup_done) {
2944                                 sd_notify(0, "READY=1");
2945                                 startup_done = 1;
2946                         }
2947 #endif
2948                 }
2949         }
2950
2951         lock(&vecs->lock);
2952         conf = get_multipath_config();
2953         queue_without_daemon = conf->queue_without_daemon;
2954         put_multipath_config(conf);
2955         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2956                 vector_foreach_slot(vecs->mpvec, mpp, i)
2957                         dm_queue_if_no_path(mpp->alias, 0);
2958         remove_maps_and_stop_waiters(vecs);
2959         unlock(&vecs->lock);
2960
2961         pthread_cancel(check_thr);
2962         pthread_cancel(uevent_thr);
2963         pthread_cancel(uxlsnr_thr);
2964         pthread_cancel(uevq_thr);
2965         if (poll_dmevents)
2966                 pthread_cancel(dmevent_thr);
2967
2968         pthread_join(check_thr, NULL);
2969         pthread_join(uevent_thr, NULL);
2970         pthread_join(uxlsnr_thr, NULL);
2971         pthread_join(uevq_thr, NULL);
2972         if (poll_dmevents)
2973                 pthread_join(dmevent_thr, NULL);
2974
2975         stop_io_err_stat_thread();
2976
2977         lock(&vecs->lock);
2978         free_pathvec(vecs->pathvec, FREE_PATHS);
2979         vecs->pathvec = NULL;
2980         unlock(&vecs->lock);
2981
2982         pthread_mutex_destroy(&vecs->lock.mutex);
2983         FREE(vecs);
2984         vecs = NULL;
2985
2986         cleanup_foreign();
2987         cleanup_checkers();
2988         cleanup_prio();
2989         if (poll_dmevents)
2990                 cleanup_dmevent_waiter();
2991
2992         dm_lib_release();
2993         dm_lib_exit();
2994
2995         /* We're done here */
2996         condlog(3, "unlink pidfile");
2997         unlink(DEFAULT_PIDFILE);
2998
2999         condlog(2, "--------shut down-------");
3000
3001         if (logsink == 1)
3002                 log_thread_stop();
3003
3004         /*
3005          * Freeing config must be done after condlog() and dm_lib_exit(),
3006          * because logging functions like dlog() and dm_write_log()
3007          * reference the config.
3008          */
3009         conf = rcu_dereference(multipath_conf);
3010         rcu_assign_pointer(multipath_conf, NULL);
3011         call_rcu(&conf->rcu, rcu_free_config);
3012         udev_unref(udev);
3013         udev = NULL;
3014         pthread_attr_destroy(&waiter_attr);
3015         pthread_attr_destroy(&io_err_stat_attr);
3016 #ifdef _DEBUG_
3017         dbg_free_final(NULL);
3018 #endif
3019
3020 #ifdef USE_SYSTEMD
3021         sd_notify(0, "ERRNO=0");
3022 #endif
3023         exit(0);
3024
3025 failed:
3026 #ifdef USE_SYSTEMD
3027         sd_notify(0, "ERRNO=1");
3028 #endif
3029         if (pid_fd >= 0)
3030                 close(pid_fd);
3031         exit(1);
3032 }
3033
3034 static int
3035 daemonize(void)
3036 {
3037         int pid;
3038         int dev_null_fd;
3039
3040         if( (pid = fork()) < 0){
3041                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
3042                 return -1;
3043         }
3044         else if (pid != 0)
3045                 return pid;
3046
3047         setsid();
3048
3049         if ( (pid = fork()) < 0)
3050                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
3051         else if (pid != 0)
3052                 _exit(0);
3053
3054         if (chdir("/") < 0)
3055                 fprintf(stderr, "cannot chdir to '/', continuing\n");
3056
3057         dev_null_fd = open("/dev/null", O_RDWR);
3058         if (dev_null_fd < 0){
3059                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
3060                         strerror(errno));
3061                 _exit(0);
3062         }
3063
3064         close(STDIN_FILENO);
3065         if (dup(dev_null_fd) < 0) {
3066                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
3067                         strerror(errno));
3068                 _exit(0);
3069         }
3070         close(STDOUT_FILENO);
3071         if (dup(dev_null_fd) < 0) {
3072                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
3073                         strerror(errno));
3074                 _exit(0);
3075         }
3076         close(STDERR_FILENO);
3077         if (dup(dev_null_fd) < 0) {
3078                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
3079                         strerror(errno));
3080                 _exit(0);
3081         }
3082         close(dev_null_fd);
3083         daemon_pid = getpid();
3084         return 0;
3085 }
3086
3087 int
3088 main (int argc, char *argv[])
3089 {
3090         extern char *optarg;
3091         extern int optind;
3092         int arg;
3093         int err;
3094         int foreground = 0;
3095         struct config *conf;
3096
3097         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
3098                                    "Manipulated through RCU");
3099         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
3100                 "Suppress complaints about unprotected running_state reads");
3101         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
3102                 "Suppress complaints about this scalar variable");
3103
3104         logsink = 1;
3105
3106         if (getuid() != 0) {
3107                 fprintf(stderr, "need to be root\n");
3108                 exit(1);
3109         }
3110
3111         /* make sure we don't lock any path */
3112         if (chdir("/") < 0)
3113                 fprintf(stderr, "can't chdir to root directory : %s\n",
3114                         strerror(errno));
3115         umask(umask(077) | 022);
3116
3117         pthread_cond_init_mono(&config_cond);
3118
3119         udev = udev_new();
3120         libmp_udev_set_sync_support(0);
3121
3122         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
3123                 switch(arg) {
3124                 case 'd':
3125                         foreground = 1;
3126                         if (logsink > 0)
3127                                 logsink = 0;
3128                         //debug=1; /* ### comment me out ### */
3129                         break;
3130                 case 'v':
3131                         if (sizeof(optarg) > sizeof(char *) ||
3132                             !isdigit(optarg[0]))
3133                                 exit(1);
3134
3135                         verbosity = atoi(optarg);
3136                         break;
3137                 case 's':
3138                         logsink = -1;
3139                         break;
3140                 case 'k':
3141                         logsink = 0;
3142                         conf = load_config(DEFAULT_CONFIGFILE);
3143                         if (!conf)
3144                                 exit(1);
3145                         if (verbosity)
3146                                 conf->verbosity = verbosity;
3147                         uxsock_timeout = conf->uxsock_timeout;
3148                         err = uxclnt(optarg, uxsock_timeout + 100);
3149                         free_config(conf);
3150                         return err;
3151                 case 'B':
3152                         bindings_read_only = 1;
3153                         break;
3154                 case 'n':
3155                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3156                         break;
3157                 case 'w':
3158                         poll_dmevents = 0;
3159                         break;
3160                 default:
3161                         fprintf(stderr, "Invalid argument '-%c'\n",
3162                                 optopt);
3163                         exit(1);
3164                 }
3165         }
3166         if (optind < argc) {
3167                 char cmd[CMDSIZE];
3168                 char * s = cmd;
3169                 char * c = s;
3170
3171                 logsink = 0;
3172                 conf = load_config(DEFAULT_CONFIGFILE);
3173                 if (!conf)
3174                         exit(1);
3175                 if (verbosity)
3176                         conf->verbosity = verbosity;
3177                 uxsock_timeout = conf->uxsock_timeout;
3178                 memset(cmd, 0x0, CMDSIZE);
3179                 while (optind < argc) {
3180                         if (strchr(argv[optind], ' '))
3181                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3182                         else
3183                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3184                         optind++;
3185                 }
3186                 c += snprintf(c, s + CMDSIZE - c, "\n");
3187                 err = uxclnt(s, uxsock_timeout + 100);
3188                 free_config(conf);
3189                 return err;
3190         }
3191
3192         if (foreground) {
3193                 if (!isatty(fileno(stdout)))
3194                         setbuf(stdout, NULL);
3195                 err = 0;
3196                 daemon_pid = getpid();
3197         } else
3198                 err = daemonize();
3199
3200         if (err < 0)
3201                 /* error */
3202                 exit(1);
3203         else if (err > 0)
3204                 /* parent dies */
3205                 exit(0);
3206         else
3207                 /* child lives */
3208                 return (child(NULL));