multipathd: register threads that use rcu calls
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <urcu.h>
21 #ifdef USE_SYSTEMD
22 #include <systemd/sd-daemon.h>
23 #endif
24 #include <semaphore.h>
25 #include <time.h>
26 #include <stdbool.h>
27
28 /*
29  * libmultipath
30  */
31 #include "time-util.h"
32
33 /*
34  * libcheckers
35  */
36 #include "checkers.h"
37
38 #ifdef USE_SYSTEMD
39 static int use_watchdog;
40 #endif
41
42 /*
43  * libmultipath
44  */
45 #include "parser.h"
46 #include "vector.h"
47 #include "memory.h"
48 #include "config.h"
49 #include "util.h"
50 #include "hwtable.h"
51 #include "defaults.h"
52 #include "structs.h"
53 #include "blacklist.h"
54 #include "structs_vec.h"
55 #include "dmparser.h"
56 #include "devmapper.h"
57 #include "sysfs.h"
58 #include "dict.h"
59 #include "discovery.h"
60 #include "debug.h"
61 #include "propsel.h"
62 #include "uevent.h"
63 #include "switchgroup.h"
64 #include "print.h"
65 #include "configure.h"
66 #include "prio.h"
67 #include "wwids.h"
68 #include "pgpolicies.h"
69 #include "uevent.h"
70 #include "log.h"
71
72 #include "mpath_cmd.h"
73 #include "mpath_persist.h"
74
75 #include "prioritizers/alua_rtpg.h"
76
77 #include "main.h"
78 #include "pidfile.h"
79 #include "uxlsnr.h"
80 #include "uxclnt.h"
81 #include "cli.h"
82 #include "cli_handlers.h"
83 #include "lock.h"
84 #include "waiter.h"
85 #include "dmevents.h"
86 #include "io_err_stat.h"
87 #include "wwids.h"
88 #include "foreign.h"
89 #include "../third-party/valgrind/drd.h"
90
91 #define FILE_NAME_SIZE 256
92 #define CMDSIZE 160
93
94 #define LOG_MSG(a, b) \
95 do { \
96         if (pp->offline) \
97                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
98         else if (strlen(b)) \
99                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
100 } while(0)
101
102 struct mpath_event_param
103 {
104         char * devname;
105         struct multipath *mpp;
106 };
107
108 int logsink;
109 int uxsock_timeout;
110 int verbosity;
111 int bindings_read_only;
112 int ignore_new_devs;
113 #ifdef NO_DMEVENTS_POLL
114 int poll_dmevents = 0;
115 #else
116 int poll_dmevents = 1;
117 #endif
118 enum daemon_status running_state = DAEMON_INIT;
119 pid_t daemon_pid;
120 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
121 pthread_cond_t config_cond;
122
123 /*
124  * global copy of vecs for use in sig handlers
125  */
126 struct vectors * gvecs;
127
128 struct udev * udev;
129
130 struct config *multipath_conf;
131
132 /* Local variables */
133 static volatile sig_atomic_t exit_sig;
134 static volatile sig_atomic_t reconfig_sig;
135 static volatile sig_atomic_t log_reset_sig;
136
137 const char *
138 daemon_status(void)
139 {
140         switch (running_state) {
141         case DAEMON_INIT:
142                 return "init";
143         case DAEMON_START:
144                 return "startup";
145         case DAEMON_CONFIGURE:
146                 return "configure";
147         case DAEMON_IDLE:
148                 return "idle";
149         case DAEMON_RUNNING:
150                 return "running";
151         case DAEMON_SHUTDOWN:
152                 return "shutdown";
153         }
154         return NULL;
155 }
156
157 /*
158  * I love you too, systemd ...
159  */
160 const char *
161 sd_notify_status(void)
162 {
163         switch (running_state) {
164         case DAEMON_INIT:
165                 return "STATUS=init";
166         case DAEMON_START:
167                 return "STATUS=startup";
168         case DAEMON_CONFIGURE:
169                 return "STATUS=configure";
170         case DAEMON_IDLE:
171         case DAEMON_RUNNING:
172                 return "STATUS=up";
173         case DAEMON_SHUTDOWN:
174                 return "STATUS=shutdown";
175         }
176         return NULL;
177 }
178
179 #ifdef USE_SYSTEMD
180 static void do_sd_notify(enum daemon_status old_state)
181 {
182         /*
183          * Checkerloop switches back and forth between idle and running state.
184          * No need to tell systemd each time.
185          * These notifications cause a lot of overhead on dbus.
186          */
187         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
188             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
189                 return;
190         sd_notify(0, sd_notify_status());
191 }
192 #endif
193
194 static void config_cleanup(void *arg)
195 {
196         pthread_mutex_unlock(&config_lock);
197 }
198
199 void post_config_state(enum daemon_status state)
200 {
201         pthread_mutex_lock(&config_lock);
202         if (state != running_state) {
203                 enum daemon_status old_state = running_state;
204
205                 running_state = state;
206                 pthread_cond_broadcast(&config_cond);
207 #ifdef USE_SYSTEMD
208                 do_sd_notify(old_state);
209 #endif
210         }
211         pthread_mutex_unlock(&config_lock);
212 }
213
214 int set_config_state(enum daemon_status state)
215 {
216         int rc = 0;
217
218         pthread_cleanup_push(config_cleanup, NULL);
219         pthread_mutex_lock(&config_lock);
220         if (running_state != state) {
221                 enum daemon_status old_state = running_state;
222
223                 if (running_state != DAEMON_IDLE) {
224                         struct timespec ts;
225
226                         clock_gettime(CLOCK_MONOTONIC, &ts);
227                         ts.tv_sec += 1;
228                         rc = pthread_cond_timedwait(&config_cond,
229                                                     &config_lock, &ts);
230                 }
231                 if (!rc) {
232                         running_state = state;
233                         pthread_cond_broadcast(&config_cond);
234 #ifdef USE_SYSTEMD
235                         do_sd_notify(old_state);
236 #endif
237                 }
238         }
239         pthread_cleanup_pop(1);
240         return rc;
241 }
242
243 struct config *get_multipath_config(void)
244 {
245         rcu_read_lock();
246         return rcu_dereference(multipath_conf);
247 }
248
249 void put_multipath_config(struct config *conf)
250 {
251         rcu_read_unlock();
252 }
253
254 static int
255 need_switch_pathgroup (struct multipath * mpp, int refresh)
256 {
257         struct pathgroup * pgp;
258         struct path * pp;
259         unsigned int i, j;
260         struct config *conf;
261         int bestpg;
262
263         if (!mpp)
264                 return 0;
265
266         /*
267          * Refresh path priority values
268          */
269         if (refresh) {
270                 vector_foreach_slot (mpp->pg, pgp, i) {
271                         vector_foreach_slot (pgp->paths, pp, j) {
272                                 conf = get_multipath_config();
273                                 pathinfo(pp, conf, DI_PRIO);
274                                 put_multipath_config(conf);
275                         }
276                 }
277         }
278
279         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
280                 return 0;
281
282         bestpg = select_path_group(mpp);
283         if (mpp->pgfailback == -FAILBACK_MANUAL)
284                 return 0;
285
286         mpp->bestpg = bestpg;
287         if (mpp->bestpg != mpp->nextpg)
288                 return 1;
289
290         return 0;
291 }
292
293 static void
294 switch_pathgroup (struct multipath * mpp)
295 {
296         mpp->stat_switchgroup++;
297         dm_switchgroup(mpp->alias, mpp->bestpg);
298         condlog(2, "%s: switch to path group #%i",
299                  mpp->alias, mpp->bestpg);
300 }
301
302 static int
303 wait_for_events(struct multipath *mpp, struct vectors *vecs)
304 {
305         if (poll_dmevents)
306                 return watch_dmevents(mpp->alias);
307         else
308                 return start_waiter_thread(mpp, vecs);
309 }
310
311 static void
312 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs,
313                            int purge_vec)
314 {
315         /* devices are automatically removed by the dmevent polling code,
316          * so they don't need to be manually removed here */
317         if (!poll_dmevents)
318                 stop_waiter_thread(mpp, vecs);
319         remove_map(mpp, vecs, purge_vec);
320 }
321
322 static void
323 remove_maps_and_stop_waiters(struct vectors *vecs)
324 {
325         int i;
326         struct multipath * mpp;
327
328         if (!vecs)
329                 return;
330
331         if (!poll_dmevents) {
332                 vector_foreach_slot(vecs->mpvec, mpp, i)
333                         stop_waiter_thread(mpp, vecs);
334         }
335         else
336                 unwatch_all_dmevents();
337
338         remove_maps(vecs);
339 }
340
341 static void
342 set_multipath_wwid (struct multipath * mpp)
343 {
344         if (strlen(mpp->wwid))
345                 return;
346
347         dm_get_uuid(mpp->alias, mpp->wwid);
348 }
349
350 static void set_no_path_retry(struct multipath *mpp)
351 {
352         char is_queueing = 0;
353
354         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
355         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
356                 is_queueing = 1;
357
358         switch (mpp->no_path_retry) {
359         case NO_PATH_RETRY_UNDEF:
360                 break;
361         case NO_PATH_RETRY_FAIL:
362                 if (is_queueing)
363                         dm_queue_if_no_path(mpp->alias, 0);
364                 break;
365         case NO_PATH_RETRY_QUEUE:
366                 if (!is_queueing)
367                         dm_queue_if_no_path(mpp->alias, 1);
368                 break;
369         default:
370                 if (mpp->nr_active > 0) {
371                         mpp->retry_tick = 0;
372                         dm_queue_if_no_path(mpp->alias, 1);
373                 } else if (is_queueing && mpp->retry_tick == 0)
374                         enter_recovery_mode(mpp);
375                 break;
376         }
377 }
378
379 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
380                       int reset)
381 {
382         if (dm_get_info(mpp->alias, &mpp->dmi)) {
383                 /* Error accessing table */
384                 condlog(3, "%s: cannot access table", mpp->alias);
385                 goto out;
386         }
387
388         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
389                 condlog(0, "%s: failed to setup multipath", mpp->alias);
390                 goto out;
391         }
392
393         if (reset) {
394                 set_no_path_retry(mpp);
395                 if (VECTOR_SIZE(mpp->paths) != 0)
396                         dm_cancel_deferred_remove(mpp);
397         }
398
399         return 0;
400 out:
401         remove_map_and_stop_waiter(mpp, vecs, PURGE_VEC);
402         return 1;
403 }
404
405 int update_multipath (struct vectors *vecs, char *mapname, int reset)
406 {
407         struct multipath *mpp;
408         struct pathgroup  *pgp;
409         struct path *pp;
410         int i, j;
411
412         mpp = find_mp_by_alias(vecs->mpvec, mapname);
413
414         if (!mpp) {
415                 condlog(3, "%s: multipath map not found", mapname);
416                 return 2;
417         }
418
419         if (__setup_multipath(vecs, mpp, reset))
420                 return 1; /* mpp freed in setup_multipath */
421
422         /*
423          * compare checkers states with DM states
424          */
425         vector_foreach_slot (mpp->pg, pgp, i) {
426                 vector_foreach_slot (pgp->paths, pp, j) {
427                         if (pp->dmstate != PSTATE_FAILED)
428                                 continue;
429
430                         if (pp->state != PATH_DOWN) {
431                                 struct config *conf = get_multipath_config();
432                                 int oldstate = pp->state;
433                                 condlog(2, "%s: mark as failed", pp->dev);
434                                 mpp->stat_path_failures++;
435                                 pp->state = PATH_DOWN;
436                                 if (oldstate == PATH_UP ||
437                                     oldstate == PATH_GHOST)
438                                         update_queue_mode_del_path(mpp);
439
440                                 /*
441                                  * if opportune,
442                                  * schedule the next check earlier
443                                  */
444                                 if (pp->tick > conf->checkint)
445                                         pp->tick = conf->checkint;
446                                 put_multipath_config(conf);
447                         }
448                 }
449         }
450         return 0;
451 }
452
453 static int
454 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
455 {
456         int retries = 3;
457         char params[PARAMS_SIZE] = {0};
458
459 retry:
460         condlog(4, "%s: updating new map", mpp->alias);
461         if (adopt_paths(vecs->pathvec, mpp)) {
462                 condlog(0, "%s: failed to adopt paths for new map update",
463                         mpp->alias);
464                 retries = -1;
465                 goto fail;
466         }
467         verify_paths(mpp, vecs);
468         mpp->action = ACT_RELOAD;
469
470         extract_hwe_from_path(mpp);
471         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
472                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
473                 retries = -1;
474                 goto fail;
475         }
476         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
477                 condlog(0, "%s: map_udate sleep", mpp->alias);
478                 sleep(1);
479                 goto retry;
480         }
481         dm_lib_release();
482
483 fail:
484         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
485                 condlog(0, "%s: failed to create new map", mpp->alias);
486                 remove_map(mpp, vecs, 1);
487                 return 1;
488         }
489
490         if (setup_multipath(vecs, mpp))
491                 return 1;
492
493         sync_map_state(mpp);
494
495         if (retries < 0)
496                 condlog(0, "%s: failed reload in new map update", mpp->alias);
497         return 0;
498 }
499
500 static struct multipath *
501 add_map_without_path (struct vectors *vecs, const char *alias)
502 {
503         struct multipath * mpp = alloc_multipath();
504         struct config *conf;
505
506         if (!mpp)
507                 return NULL;
508         if (!alias) {
509                 FREE(mpp);
510                 return NULL;
511         }
512
513         mpp->alias = STRDUP(alias);
514
515         if (dm_get_info(mpp->alias, &mpp->dmi)) {
516                 condlog(3, "%s: cannot access table", mpp->alias);
517                 goto out;
518         }
519         set_multipath_wwid(mpp);
520         conf = get_multipath_config();
521         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
522         put_multipath_config(conf);
523
524         if (update_multipath_table(mpp, vecs->pathvec, 1))
525                 goto out;
526         if (update_multipath_status(mpp))
527                 goto out;
528
529         if (!vector_alloc_slot(vecs->mpvec))
530                 goto out;
531
532         vector_set_slot(vecs->mpvec, mpp);
533
534         if (update_map(mpp, vecs, 1) != 0) /* map removed */
535                 return NULL;
536
537         return mpp;
538 out:
539         remove_map(mpp, vecs, PURGE_VEC);
540         return NULL;
541 }
542
543 static int
544 coalesce_maps(struct vectors *vecs, vector nmpv)
545 {
546         struct multipath * ompp;
547         vector ompv = vecs->mpvec;
548         unsigned int i, reassign_maps;
549         struct config *conf;
550
551         conf = get_multipath_config();
552         reassign_maps = conf->reassign_maps;
553         put_multipath_config(conf);
554         vector_foreach_slot (ompv, ompp, i) {
555                 condlog(3, "%s: coalesce map", ompp->alias);
556                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
557                         /*
558                          * remove all current maps not allowed by the
559                          * current configuration
560                          */
561                         if (dm_flush_map(ompp->alias)) {
562                                 condlog(0, "%s: unable to flush devmap",
563                                         ompp->alias);
564                                 /*
565                                  * may be just because the device is open
566                                  */
567                                 if (setup_multipath(vecs, ompp) != 0) {
568                                         i--;
569                                         continue;
570                                 }
571                                 if (!vector_alloc_slot(nmpv))
572                                         return 1;
573
574                                 vector_set_slot(nmpv, ompp);
575
576                                 vector_del_slot(ompv, i);
577                                 i--;
578                         }
579                         else {
580                                 dm_lib_release();
581                                 condlog(2, "%s devmap removed", ompp->alias);
582                         }
583                 } else if (reassign_maps) {
584                         condlog(3, "%s: Reassign existing device-mapper"
585                                 " devices", ompp->alias);
586                         dm_reassign(ompp->alias);
587                 }
588         }
589         return 0;
590 }
591
592 static void
593 sync_maps_state(vector mpvec)
594 {
595         unsigned int i;
596         struct multipath *mpp;
597
598         vector_foreach_slot (mpvec, mpp, i)
599                 sync_map_state(mpp);
600 }
601
602 static int
603 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
604 {
605         int r;
606
607         if (nopaths)
608                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
609         else
610                 r = dm_flush_map(mpp->alias);
611         /*
612          * clear references to this map before flushing so we can ignore
613          * the spurious uevent we may generate with the dm_flush_map call below
614          */
615         if (r) {
616                 /*
617                  * May not really be an error -- if the map was already flushed
618                  * from the device mapper by dmsetup(8) for instance.
619                  */
620                 if (r == 1)
621                         condlog(0, "%s: can't flush", mpp->alias);
622                 else {
623                         condlog(2, "%s: devmap deferred remove", mpp->alias);
624                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
625                 }
626                 return r;
627         }
628         else {
629                 dm_lib_release();
630                 condlog(2, "%s: map flushed", mpp->alias);
631         }
632
633         orphan_paths(vecs->pathvec, mpp);
634         remove_map_and_stop_waiter(mpp, vecs, 1);
635
636         return 0;
637 }
638
639 static int
640 uev_add_map (struct uevent * uev, struct vectors * vecs)
641 {
642         char *alias;
643         int major = -1, minor = -1, rc;
644
645         condlog(3, "%s: add map (uevent)", uev->kernel);
646         alias = uevent_get_dm_name(uev);
647         if (!alias) {
648                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
649                 major = uevent_get_major(uev);
650                 minor = uevent_get_minor(uev);
651                 alias = dm_mapname(major, minor);
652                 if (!alias) {
653                         condlog(2, "%s: mapname not found for %d:%d",
654                                 uev->kernel, major, minor);
655                         return 1;
656                 }
657         }
658         pthread_cleanup_push(cleanup_lock, &vecs->lock);
659         lock(&vecs->lock);
660         pthread_testcancel();
661         rc = ev_add_map(uev->kernel, alias, vecs);
662         lock_cleanup_pop(vecs->lock);
663         FREE(alias);
664         return rc;
665 }
666
667 /*
668  * ev_add_map expects that the multipath device already exists in kernel
669  * before it is called. It just adds a device to multipathd or updates an
670  * existing device.
671  */
672 int
673 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
674 {
675         struct multipath * mpp;
676         int delayed_reconfig, reassign_maps;
677         struct config *conf;
678
679         if (!dm_is_mpath(alias)) {
680                 condlog(4, "%s: not a multipath map", alias);
681                 return 0;
682         }
683
684         mpp = find_mp_by_alias(vecs->mpvec, alias);
685
686         if (mpp) {
687                 if (mpp->wait_for_udev > 1) {
688                         condlog(2, "%s: performing delayed actions",
689                                 mpp->alias);
690                         if (update_map(mpp, vecs, 0))
691                                 /* setup multipathd removed the map */
692                                 return 1;
693                 }
694                 conf = get_multipath_config();
695                 delayed_reconfig = conf->delayed_reconfig;
696                 reassign_maps = conf->reassign_maps;
697                 put_multipath_config(conf);
698                 if (mpp->wait_for_udev) {
699                         mpp->wait_for_udev = 0;
700                         if (delayed_reconfig &&
701                             !need_to_delay_reconfig(vecs)) {
702                                 condlog(2, "reconfigure (delayed)");
703                                 set_config_state(DAEMON_CONFIGURE);
704                                 return 0;
705                         }
706                 }
707                 /*
708                  * Not really an error -- we generate our own uevent
709                  * if we create a multipath mapped device as a result
710                  * of uev_add_path
711                  */
712                 if (reassign_maps) {
713                         condlog(3, "%s: Reassign existing device-mapper devices",
714                                 alias);
715                         dm_reassign(alias);
716                 }
717                 return 0;
718         }
719         condlog(2, "%s: adding map", alias);
720
721         /*
722          * now we can register the map
723          */
724         if ((mpp = add_map_without_path(vecs, alias))) {
725                 sync_map_state(mpp);
726                 condlog(2, "%s: devmap %s registered", alias, dev);
727                 return 0;
728         } else {
729                 condlog(2, "%s: ev_add_map failed", dev);
730                 return 1;
731         }
732 }
733
734 static int
735 uev_remove_map (struct uevent * uev, struct vectors * vecs)
736 {
737         char *alias;
738         int minor;
739         struct multipath *mpp;
740
741         condlog(2, "%s: remove map (uevent)", uev->kernel);
742         alias = uevent_get_dm_name(uev);
743         if (!alias) {
744                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
745                 return 0;
746         }
747         minor = uevent_get_minor(uev);
748
749         pthread_cleanup_push(cleanup_lock, &vecs->lock);
750         lock(&vecs->lock);
751         pthread_testcancel();
752         mpp = find_mp_by_minor(vecs->mpvec, minor);
753
754         if (!mpp) {
755                 condlog(2, "%s: devmap not registered, can't remove",
756                         uev->kernel);
757                 goto out;
758         }
759         if (strcmp(mpp->alias, alias)) {
760                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
761                         mpp->alias, mpp->dmi->minor, minor);
762                 goto out;
763         }
764
765         orphan_paths(vecs->pathvec, mpp);
766         remove_map_and_stop_waiter(mpp, vecs, 1);
767 out:
768         lock_cleanup_pop(vecs->lock);
769         FREE(alias);
770         return 0;
771 }
772
773 /* Called from CLI handler */
774 int
775 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
776 {
777         struct multipath * mpp;
778
779         mpp = find_mp_by_minor(vecs->mpvec, minor);
780
781         if (!mpp) {
782                 condlog(2, "%s: devmap not registered, can't remove",
783                         devname);
784                 return 1;
785         }
786         if (strcmp(mpp->alias, alias)) {
787                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
788                         mpp->alias, mpp->dmi->minor, minor);
789                 return 1;
790         }
791         return flush_map(mpp, vecs, 0);
792 }
793
794 static int
795 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
796 {
797         struct path *pp;
798         int ret = 0, i;
799         struct config *conf;
800
801         condlog(2, "%s: add path (uevent)", uev->kernel);
802         if (strstr(uev->kernel, "..") != NULL) {
803                 /*
804                  * Don't allow relative device names in the pathvec
805                  */
806                 condlog(0, "%s: path name is invalid", uev->kernel);
807                 return 1;
808         }
809
810         pthread_cleanup_push(cleanup_lock, &vecs->lock);
811         lock(&vecs->lock);
812         pthread_testcancel();
813         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
814         if (pp) {
815                 int r;
816
817                 condlog(2, "%s: spurious uevent, path already in pathvec",
818                         uev->kernel);
819                 if (!pp->mpp && !strlen(pp->wwid)) {
820                         condlog(3, "%s: reinitialize path", uev->kernel);
821                         udev_device_unref(pp->udev);
822                         pp->udev = udev_device_ref(uev->udev);
823                         conf = get_multipath_config();
824                         r = pathinfo(pp, conf,
825                                      DI_ALL | DI_BLACKLIST);
826                         put_multipath_config(conf);
827                         if (r == PATHINFO_OK)
828                                 ret = ev_add_path(pp, vecs, need_do_map);
829                         else if (r == PATHINFO_SKIPPED) {
830                                 condlog(3, "%s: remove blacklisted path",
831                                         uev->kernel);
832                                 i = find_slot(vecs->pathvec, (void *)pp);
833                                 if (i != -1)
834                                         vector_del_slot(vecs->pathvec, i);
835                                 free_path(pp);
836                         } else {
837                                 condlog(0, "%s: failed to reinitialize path",
838                                         uev->kernel);
839                                 ret = 1;
840                         }
841                 }
842         }
843         lock_cleanup_pop(vecs->lock);
844         if (pp)
845                 return ret;
846
847         /*
848          * get path vital state
849          */
850         conf = get_multipath_config();
851         ret = alloc_path_with_pathinfo(conf, uev->udev,
852                                        uev->wwid, DI_ALL, &pp);
853         put_multipath_config(conf);
854         if (!pp) {
855                 if (ret == PATHINFO_SKIPPED)
856                         return 0;
857                 condlog(3, "%s: failed to get path info", uev->kernel);
858                 return 1;
859         }
860         pthread_cleanup_push(cleanup_lock, &vecs->lock);
861         lock(&vecs->lock);
862         pthread_testcancel();
863         ret = store_path(vecs->pathvec, pp);
864         if (!ret) {
865                 conf = get_multipath_config();
866                 pp->checkint = conf->checkint;
867                 put_multipath_config(conf);
868                 ret = ev_add_path(pp, vecs, need_do_map);
869         } else {
870                 condlog(0, "%s: failed to store path info, "
871                         "dropping event",
872                         uev->kernel);
873                 free_path(pp);
874                 ret = 1;
875         }
876         lock_cleanup_pop(vecs->lock);
877         return ret;
878 }
879
880 /*
881  * returns:
882  * 0: added
883  * 1: error
884  */
885 int
886 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
887 {
888         struct multipath * mpp;
889         char params[PARAMS_SIZE] = {0};
890         int retries = 3;
891         int start_waiter = 0;
892         int ret;
893
894         /*
895          * need path UID to go any further
896          */
897         if (strlen(pp->wwid) == 0) {
898                 condlog(0, "%s: failed to get path uid", pp->dev);
899                 goto fail; /* leave path added to pathvec */
900         }
901         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
902         if (mpp && mpp->wait_for_udev &&
903             (pathcount(mpp, PATH_UP) > 0 ||
904              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
905               mpp->ghost_delay_tick <= 0))) {
906                 /* if wait_for_udev is set and valid paths exist */
907                 condlog(2, "%s: delaying path addition until %s is fully initialized", pp->dev, mpp->alias);
908                 mpp->wait_for_udev = 2;
909                 orphan_path(pp, "waiting for create to complete");
910                 return 0;
911         }
912
913         pp->mpp = mpp;
914 rescan:
915         if (mpp) {
916                 if (pp->size && mpp->size != pp->size) {
917                         condlog(0, "%s: failed to add new path %s, "
918                                 "device size mismatch",
919                                 mpp->alias, pp->dev);
920                         int i = find_slot(vecs->pathvec, (void *)pp);
921                         if (i != -1)
922                                 vector_del_slot(vecs->pathvec, i);
923                         free_path(pp);
924                         return 1;
925                 }
926
927                 condlog(4,"%s: adopting all paths for path %s",
928                         mpp->alias, pp->dev);
929                 if (adopt_paths(vecs->pathvec, mpp))
930                         goto fail; /* leave path added to pathvec */
931
932                 verify_paths(mpp, vecs);
933                 mpp->action = ACT_RELOAD;
934                 extract_hwe_from_path(mpp);
935         } else {
936                 if (!should_multipath(pp, vecs->pathvec)) {
937                         orphan_path(pp, "only one path");
938                         return 0;
939                 }
940                 condlog(4,"%s: creating new map", pp->dev);
941                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
942                         mpp->action = ACT_CREATE;
943                         /*
944                          * We don't depend on ACT_CREATE, as domap will
945                          * set it to ACT_NOTHING when complete.
946                          */
947                         start_waiter = 1;
948                 }
949                 if (!start_waiter)
950                         goto fail; /* leave path added to pathvec */
951         }
952
953         /* persistent reservation check*/
954         mpath_pr_event_handle(pp);
955
956         if (!need_do_map)
957                 return 0;
958
959         if (!dm_map_present(mpp->alias)) {
960                 mpp->action = ACT_CREATE;
961                 start_waiter = 1;
962         }
963         /*
964          * push the map to the device-mapper
965          */
966         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
967                 condlog(0, "%s: failed to setup map for addition of new "
968                         "path %s", mpp->alias, pp->dev);
969                 goto fail_map;
970         }
971         /*
972          * reload the map for the multipath mapped device
973          */
974 retry:
975         ret = domap(mpp, params, 1);
976         if (ret <= 0) {
977                 if (ret < 0 && retries-- > 0) {
978                         condlog(0, "%s: retry domap for addition of new "
979                                 "path %s", mpp->alias, pp->dev);
980                         sleep(1);
981                         goto retry;
982                 }
983                 condlog(0, "%s: failed in domap for addition of new "
984                         "path %s", mpp->alias, pp->dev);
985                 /*
986                  * deal with asynchronous uevents :((
987                  */
988                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
989                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
990                         sleep(1);
991                         update_mpp_paths(mpp, vecs->pathvec);
992                         goto rescan;
993                 }
994                 else if (mpp->action == ACT_RELOAD)
995                         condlog(0, "%s: giving up reload", mpp->alias);
996                 else
997                         goto fail_map;
998         }
999         dm_lib_release();
1000
1001         if ((mpp->action == ACT_CREATE ||
1002              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1003             wait_for_events(mpp, vecs))
1004                         goto fail_map;
1005
1006         /*
1007          * update our state from kernel regardless of create or reload
1008          */
1009         if (setup_multipath(vecs, mpp))
1010                 goto fail; /* if setup_multipath fails, it removes the map */
1011
1012         sync_map_state(mpp);
1013
1014         if (retries >= 0) {
1015                 condlog(2, "%s [%s]: path added to devmap %s",
1016                         pp->dev, pp->dev_t, mpp->alias);
1017                 return 0;
1018         } else
1019                 goto fail;
1020
1021 fail_map:
1022         remove_map(mpp, vecs, 1);
1023 fail:
1024         orphan_path(pp, "failed to add path");
1025         return 1;
1026 }
1027
1028 static int
1029 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1030 {
1031         struct path *pp;
1032         int ret;
1033
1034         condlog(2, "%s: remove path (uevent)", uev->kernel);
1035         delete_foreign(uev->udev);
1036
1037         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1038         lock(&vecs->lock);
1039         pthread_testcancel();
1040         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1041         if (pp)
1042                 ret = ev_remove_path(pp, vecs, need_do_map);
1043         lock_cleanup_pop(vecs->lock);
1044         if (!pp) {
1045                 /* Not an error; path might have been purged earlier */
1046                 condlog(0, "%s: path already removed", uev->kernel);
1047                 return 0;
1048         }
1049         return ret;
1050 }
1051
1052 int
1053 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1054 {
1055         struct multipath * mpp;
1056         int i, retval = 0;
1057         char params[PARAMS_SIZE] = {0};
1058
1059         /*
1060          * avoid referring to the map of an orphaned path
1061          */
1062         if ((mpp = pp->mpp)) {
1063                 /*
1064                  * transform the mp->pg vector of vectors of paths
1065                  * into a mp->params string to feed the device-mapper
1066                  */
1067                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1068                         condlog(0, "%s: failed to update paths",
1069                                 mpp->alias);
1070                         goto fail;
1071                 }
1072                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1073                         vector_del_slot(mpp->paths, i);
1074
1075                 /*
1076                  * remove the map IF removing the last path
1077                  */
1078                 if (VECTOR_SIZE(mpp->paths) == 0) {
1079                         char alias[WWID_SIZE];
1080
1081                         /*
1082                          * flush_map will fail if the device is open
1083                          */
1084                         strncpy(alias, mpp->alias, WWID_SIZE);
1085                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1086                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1087                                 mpp->retry_tick = 0;
1088                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1089                                 mpp->disable_queueing = 1;
1090                                 mpp->stat_map_failures++;
1091                                 dm_queue_if_no_path(mpp->alias, 0);
1092                         }
1093                         if (!flush_map(mpp, vecs, 1)) {
1094                                 condlog(2, "%s: removed map after"
1095                                         " removing all paths",
1096                                         alias);
1097                                 retval = 0;
1098                                 goto out;
1099                         }
1100                         /*
1101                          * Not an error, continue
1102                          */
1103                 }
1104
1105                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1106                         condlog(0, "%s: failed to setup map for"
1107                                 " removal of path %s", mpp->alias, pp->dev);
1108                         goto fail;
1109                 }
1110
1111                 if (mpp->wait_for_udev) {
1112                         mpp->wait_for_udev = 2;
1113                         goto out;
1114                 }
1115
1116                 if (!need_do_map)
1117                         goto out;
1118                 /*
1119                  * reload the map
1120                  */
1121                 mpp->action = ACT_RELOAD;
1122                 if (domap(mpp, params, 1) <= 0) {
1123                         condlog(0, "%s: failed in domap for "
1124                                 "removal of path %s",
1125                                 mpp->alias, pp->dev);
1126                         retval = 1;
1127                 } else {
1128                         /*
1129                          * update our state from kernel
1130                          */
1131                         if (setup_multipath(vecs, mpp))
1132                                 return 1;
1133                         sync_map_state(mpp);
1134
1135                         condlog(2, "%s [%s]: path removed from map %s",
1136                                 pp->dev, pp->dev_t, mpp->alias);
1137                 }
1138         }
1139
1140 out:
1141         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1142                 vector_del_slot(vecs->pathvec, i);
1143
1144         free_path(pp);
1145
1146         return retval;
1147
1148 fail:
1149         remove_map_and_stop_waiter(mpp, vecs, 1);
1150         return 1;
1151 }
1152
1153 static int
1154 uev_update_path (struct uevent *uev, struct vectors * vecs)
1155 {
1156         int ro, retval = 0, rc;
1157         struct path * pp;
1158         struct config *conf;
1159         int disable_changed_wwids;
1160         int needs_reinit = 0;
1161
1162         switch ((rc = change_foreign(uev->udev))) {
1163         case FOREIGN_OK:
1164                 /* known foreign path, ignore event */
1165                 return 0;
1166         case FOREIGN_IGNORED:
1167                 break;
1168         case FOREIGN_ERR:
1169                 condlog(3, "%s: error in change_foreign", __func__);
1170                 break;
1171         default:
1172                 condlog(1, "%s: return code %d of change_forein is unsupported",
1173                         __func__, rc);
1174                 break;
1175         }
1176
1177         conf = get_multipath_config();
1178         disable_changed_wwids = conf->disable_changed_wwids;
1179         put_multipath_config(conf);
1180
1181         ro = uevent_get_disk_ro(uev);
1182
1183         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1184         lock(&vecs->lock);
1185         pthread_testcancel();
1186
1187         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1188         if (pp) {
1189                 struct multipath *mpp = pp->mpp;
1190                 char wwid[WWID_SIZE];
1191
1192                 strcpy(wwid, pp->wwid);
1193                 get_uid(pp, pp->state, uev->udev);
1194
1195                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1196                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1197                                 uev->kernel, wwid, pp->wwid,
1198                                 (disable_changed_wwids ? "disallowing" :
1199                                  "continuing"));
1200                         if (disable_changed_wwids &&
1201                             (strlen(wwid) || pp->wwid_changed)) {
1202                                 strcpy(pp->wwid, wwid);
1203                                 if (!pp->wwid_changed) {
1204                                         pp->wwid_changed = 1;
1205                                         pp->tick = 1;
1206                                         if (pp->mpp)
1207                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1208                                 }
1209                                 goto out;
1210                         } else if (!disable_changed_wwids)
1211                                 strcpy(pp->wwid, wwid);
1212                         else
1213                                 pp->wwid_changed = 0;
1214                 } else {
1215                         udev_device_unref(pp->udev);
1216                         pp->udev = udev_device_ref(uev->udev);
1217                         conf = get_multipath_config();
1218                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1219                                 condlog(1, "%s: pathinfo failed after change uevent",
1220                                         uev->kernel);
1221                         put_multipath_config(conf);
1222                 }
1223
1224                 if (pp->initialized == INIT_REQUESTED_UDEV)
1225                         needs_reinit = 1;
1226                 else if (mpp && ro >= 0) {
1227                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1228
1229                         if (mpp->wait_for_udev)
1230                                 mpp->wait_for_udev = 2;
1231                         else {
1232                                 if (ro == 1)
1233                                         pp->mpp->force_readonly = 1;
1234                                 retval = reload_map(vecs, mpp, 0, 1);
1235                                 pp->mpp->force_readonly = 0;
1236                                 condlog(2, "%s: map %s reloaded (retval %d)",
1237                                         uev->kernel, mpp->alias, retval);
1238                         }
1239                 }
1240         }
1241 out:
1242         lock_cleanup_pop(vecs->lock);
1243         if (!pp) {
1244                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1245                 if (uev->udev) {
1246                         int flag = DI_SYSFS | DI_WWID;
1247
1248                         conf = get_multipath_config();
1249                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1250                         put_multipath_config(conf);
1251
1252                         if (retval == PATHINFO_SKIPPED) {
1253                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1254                                 return 0;
1255                         }
1256                 }
1257
1258                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1259         }
1260         if (needs_reinit)
1261                 retval = uev_add_path(uev, vecs, 1);
1262         return retval;
1263 }
1264
1265 static int
1266 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1267 {
1268         char *action = NULL, *devt = NULL;
1269         struct path *pp;
1270         int r = 1;
1271
1272         action = uevent_get_dm_action(uev);
1273         if (!action)
1274                 return 1;
1275         if (strncmp(action, "PATH_FAILED", 11))
1276                 goto out;
1277         devt = uevent_get_dm_path(uev);
1278         if (!devt) {
1279                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1280                 goto out;
1281         }
1282
1283         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1284         lock(&vecs->lock);
1285         pthread_testcancel();
1286         pp = find_path_by_devt(vecs->pathvec, devt);
1287         if (!pp)
1288                 goto out_lock;
1289         r = io_err_stat_handle_pathfail(pp);
1290         if (r)
1291                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1292                                 pp->dev);
1293 out_lock:
1294         lock_cleanup_pop(vecs->lock);
1295         FREE(devt);
1296         FREE(action);
1297         return r;
1298 out:
1299         FREE(action);
1300         return 1;
1301 }
1302
1303 static int
1304 map_discovery (struct vectors * vecs)
1305 {
1306         struct multipath * mpp;
1307         unsigned int i;
1308
1309         if (dm_get_maps(vecs->mpvec))
1310                 return 1;
1311
1312         vector_foreach_slot (vecs->mpvec, mpp, i)
1313                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1314                     update_multipath_status(mpp)) {
1315                         remove_map(mpp, vecs, 1);
1316                         i--;
1317                 }
1318
1319         return 0;
1320 }
1321
1322 int
1323 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1324                 void * trigger_data)
1325 {
1326         struct vectors * vecs;
1327         int r;
1328
1329         *reply = NULL;
1330         *len = 0;
1331         vecs = (struct vectors *)trigger_data;
1332
1333         if ((str != NULL) && (is_root == false) &&
1334             (strncmp(str, "list", strlen("list")) != 0) &&
1335             (strncmp(str, "show", strlen("show")) != 0)) {
1336                 *reply = STRDUP("permission deny: need to be root");
1337                 if (*reply)
1338                         *len = strlen(*reply) + 1;
1339                 return 1;
1340         }
1341
1342         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1343
1344         if (r > 0) {
1345                 if (r == ETIMEDOUT)
1346                         *reply = STRDUP("timeout\n");
1347                 else
1348                         *reply = STRDUP("fail\n");
1349                 if (*reply)
1350                         *len = strlen(*reply) + 1;
1351                 r = 1;
1352         }
1353         else if (!r && *len == 0) {
1354                 *reply = STRDUP("ok\n");
1355                 if (*reply)
1356                         *len = strlen(*reply) + 1;
1357                 r = 0;
1358         }
1359         /* else if (r < 0) leave *reply alone */
1360
1361         return r;
1362 }
1363
1364 int
1365 uev_trigger (struct uevent * uev, void * trigger_data)
1366 {
1367         int r = 0;
1368         struct vectors * vecs;
1369         struct uevent *merge_uev, *tmp;
1370
1371         vecs = (struct vectors *)trigger_data;
1372
1373         pthread_cleanup_push(config_cleanup, NULL);
1374         pthread_mutex_lock(&config_lock);
1375         if (running_state != DAEMON_IDLE &&
1376             running_state != DAEMON_RUNNING)
1377                 pthread_cond_wait(&config_cond, &config_lock);
1378         pthread_cleanup_pop(1);
1379
1380         if (running_state == DAEMON_SHUTDOWN)
1381                 return 0;
1382
1383         /*
1384          * device map event
1385          * Add events are ignored here as the tables
1386          * are not fully initialised then.
1387          */
1388         if (!strncmp(uev->kernel, "dm-", 3)) {
1389                 if (!uevent_is_mpath(uev)) {
1390                         if (!strncmp(uev->action, "change", 6))
1391                                 (void)add_foreign(uev->udev);
1392                         else if (!strncmp(uev->action, "remove", 6))
1393                                 (void)delete_foreign(uev->udev);
1394                         goto out;
1395                 }
1396                 if (!strncmp(uev->action, "change", 6)) {
1397                         r = uev_add_map(uev, vecs);
1398
1399                         /*
1400                          * the kernel-side dm-mpath issues a PATH_FAILED event
1401                          * when it encounters a path IO error. It is reason-
1402                          * able be the entry of path IO error accounting pro-
1403                          * cess.
1404                          */
1405                         uev_pathfail_check(uev, vecs);
1406                 } else if (!strncmp(uev->action, "remove", 6)) {
1407                         r = uev_remove_map(uev, vecs);
1408                 }
1409                 goto out;
1410         }
1411
1412         /*
1413          * path add/remove/change event, add/remove maybe merged
1414          */
1415         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1416                 if (!strncmp(merge_uev->action, "add", 3))
1417                         r += uev_add_path(merge_uev, vecs, 0);
1418                 if (!strncmp(merge_uev->action, "remove", 6))
1419                         r += uev_remove_path(merge_uev, vecs, 0);
1420         }
1421
1422         if (!strncmp(uev->action, "add", 3))
1423                 r += uev_add_path(uev, vecs, 1);
1424         if (!strncmp(uev->action, "remove", 6))
1425                 r += uev_remove_path(uev, vecs, 1);
1426         if (!strncmp(uev->action, "change", 6))
1427                 r += uev_update_path(uev, vecs);
1428
1429 out:
1430         return r;
1431 }
1432
1433 static void rcu_unregister(void *param)
1434 {
1435         rcu_unregister_thread();
1436 }
1437
1438 static void *
1439 ueventloop (void * ap)
1440 {
1441         struct udev *udev = ap;
1442
1443         pthread_cleanup_push(rcu_unregister, NULL);
1444         rcu_register_thread();
1445         if (uevent_listen(udev))
1446                 condlog(0, "error starting uevent listener");
1447         pthread_cleanup_pop(1);
1448         return NULL;
1449 }
1450
1451 static void *
1452 uevqloop (void * ap)
1453 {
1454         pthread_cleanup_push(rcu_unregister, NULL);
1455         rcu_register_thread();
1456         if (uevent_dispatch(&uev_trigger, ap))
1457                 condlog(0, "error starting uevent dispatcher");
1458         pthread_cleanup_pop(1);
1459         return NULL;
1460 }
1461 static void *
1462 uxlsnrloop (void * ap)
1463 {
1464         if (cli_init()) {
1465                 condlog(1, "Failed to init uxsock listener");
1466                 return NULL;
1467         }
1468         pthread_cleanup_push(rcu_unregister, NULL);
1469         rcu_register_thread();
1470         set_handler_callback(LIST+PATHS, cli_list_paths);
1471         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1472         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1473         set_handler_callback(LIST+PATH, cli_list_path);
1474         set_handler_callback(LIST+MAPS, cli_list_maps);
1475         set_handler_callback(LIST+STATUS, cli_list_status);
1476         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1477         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1478         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1479         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1480         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1481         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1482         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1483         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1484         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1485         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1486         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1487         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1488         set_handler_callback(LIST+CONFIG, cli_list_config);
1489         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1490         set_handler_callback(LIST+DEVICES, cli_list_devices);
1491         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1492         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1493         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1494         set_handler_callback(ADD+PATH, cli_add_path);
1495         set_handler_callback(DEL+PATH, cli_del_path);
1496         set_handler_callback(ADD+MAP, cli_add_map);
1497         set_handler_callback(DEL+MAP, cli_del_map);
1498         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1499         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1500         set_handler_callback(SUSPEND+MAP, cli_suspend);
1501         set_handler_callback(RESUME+MAP, cli_resume);
1502         set_handler_callback(RESIZE+MAP, cli_resize);
1503         set_handler_callback(RELOAD+MAP, cli_reload);
1504         set_handler_callback(RESET+MAP, cli_reassign);
1505         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1506         set_handler_callback(FAIL+PATH, cli_fail);
1507         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1508         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1509         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1510         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1511         set_unlocked_handler_callback(QUIT, cli_quit);
1512         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1513         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1514         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1515         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1516         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1517         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1518         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1519         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1520         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1521
1522         umask(077);
1523         uxsock_listen(&uxsock_trigger, ap);
1524         pthread_cleanup_pop(1);
1525         return NULL;
1526 }
1527
1528 void
1529 exit_daemon (void)
1530 {
1531         post_config_state(DAEMON_SHUTDOWN);
1532 }
1533
1534 static void
1535 fail_path (struct path * pp, int del_active)
1536 {
1537         if (!pp->mpp)
1538                 return;
1539
1540         condlog(2, "checker failed path %s in map %s",
1541                  pp->dev_t, pp->mpp->alias);
1542
1543         dm_fail_path(pp->mpp->alias, pp->dev_t);
1544         if (del_active)
1545                 update_queue_mode_del_path(pp->mpp);
1546 }
1547
1548 /*
1549  * caller must have locked the path list before calling that function
1550  */
1551 static int
1552 reinstate_path (struct path * pp, int add_active)
1553 {
1554         int ret = 0;
1555
1556         if (!pp->mpp)
1557                 return 0;
1558
1559         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1560                 condlog(0, "%s: reinstate failed", pp->dev_t);
1561                 ret = 1;
1562         } else {
1563                 condlog(2, "%s: reinstated", pp->dev_t);
1564                 if (add_active)
1565                         update_queue_mode_add_path(pp->mpp);
1566         }
1567         return ret;
1568 }
1569
1570 static void
1571 enable_group(struct path * pp)
1572 {
1573         struct pathgroup * pgp;
1574
1575         /*
1576          * if path is added through uev_add_path, pgindex can be unset.
1577          * next update_strings() will set it, upon map reload event.
1578          *
1579          * we can safely return here, because upon map reload, all
1580          * PG will be enabled.
1581          */
1582         if (!pp->mpp->pg || !pp->pgindex)
1583                 return;
1584
1585         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1586
1587         if (pgp->status == PGSTATE_DISABLED) {
1588                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1589                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1590         }
1591 }
1592
1593 static void
1594 mpvec_garbage_collector (struct vectors * vecs)
1595 {
1596         struct multipath * mpp;
1597         unsigned int i;
1598
1599         if (!vecs->mpvec)
1600                 return;
1601
1602         vector_foreach_slot (vecs->mpvec, mpp, i) {
1603                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1604                         condlog(2, "%s: remove dead map", mpp->alias);
1605                         remove_map_and_stop_waiter(mpp, vecs, 1);
1606                         i--;
1607                 }
1608         }
1609 }
1610
1611 /* This is called after a path has started working again. It the multipath
1612  * device for this path uses the followover failback type, and this is the
1613  * best pathgroup, and this is the first path in the pathgroup to come back
1614  * up, then switch to this pathgroup */
1615 static int
1616 followover_should_failback(struct path * pp)
1617 {
1618         struct pathgroup * pgp;
1619         struct path *pp1;
1620         int i;
1621
1622         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1623             !pp->mpp->pg || !pp->pgindex ||
1624             pp->pgindex != pp->mpp->bestpg)
1625                 return 0;
1626
1627         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1628         vector_foreach_slot(pgp->paths, pp1, i) {
1629                 if (pp1 == pp)
1630                         continue;
1631                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1632                         return 0;
1633         }
1634         return 1;
1635 }
1636
1637 static void
1638 missing_uev_wait_tick(struct vectors *vecs)
1639 {
1640         struct multipath * mpp;
1641         unsigned int i;
1642         int timed_out = 0, delayed_reconfig;
1643         struct config *conf;
1644
1645         vector_foreach_slot (vecs->mpvec, mpp, i) {
1646                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1647                         timed_out = 1;
1648                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1649                         if (mpp->wait_for_udev > 1 &&
1650                             update_map(mpp, vecs, 0)) {
1651                                 /* update_map removed map */
1652                                 i--;
1653                                 continue;
1654                         }
1655                         mpp->wait_for_udev = 0;
1656                 }
1657         }
1658
1659         conf = get_multipath_config();
1660         delayed_reconfig = conf->delayed_reconfig;
1661         put_multipath_config(conf);
1662         if (timed_out && delayed_reconfig &&
1663             !need_to_delay_reconfig(vecs)) {
1664                 condlog(2, "reconfigure (delayed)");
1665                 set_config_state(DAEMON_CONFIGURE);
1666         }
1667 }
1668
1669 static void
1670 ghost_delay_tick(struct vectors *vecs)
1671 {
1672         struct multipath * mpp;
1673         unsigned int i;
1674
1675         vector_foreach_slot (vecs->mpvec, mpp, i) {
1676                 if (mpp->ghost_delay_tick <= 0)
1677                         continue;
1678                 if (--mpp->ghost_delay_tick <= 0) {
1679                         condlog(0, "%s: timed out waiting for active path",
1680                                 mpp->alias);
1681                         mpp->force_udev_reload = 1;
1682                         if (update_map(mpp, vecs, 0) != 0) {
1683                                 /* update_map removed map */
1684                                 i--;
1685                                 continue;
1686                         }
1687                 }
1688         }
1689 }
1690
1691 static void
1692 defered_failback_tick (vector mpvec)
1693 {
1694         struct multipath * mpp;
1695         unsigned int i;
1696
1697         vector_foreach_slot (mpvec, mpp, i) {
1698                 /*
1699                  * deferred failback getting sooner
1700                  */
1701                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1702                         mpp->failback_tick--;
1703
1704                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1705                                 switch_pathgroup(mpp);
1706                 }
1707         }
1708 }
1709
1710 static void
1711 retry_count_tick(vector mpvec)
1712 {
1713         struct multipath *mpp;
1714         unsigned int i;
1715
1716         vector_foreach_slot (mpvec, mpp, i) {
1717                 if (mpp->retry_tick > 0) {
1718                         mpp->stat_total_queueing_time++;
1719                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1720                         if(--mpp->retry_tick == 0) {
1721                                 mpp->stat_map_failures++;
1722                                 dm_queue_if_no_path(mpp->alias, 0);
1723                                 condlog(2, "%s: Disable queueing", mpp->alias);
1724                         }
1725                 }
1726         }
1727 }
1728
1729 int update_prio(struct path *pp, int refresh_all)
1730 {
1731         int oldpriority;
1732         struct path *pp1;
1733         struct pathgroup * pgp;
1734         int i, j, changed = 0;
1735         struct config *conf;
1736
1737         if (refresh_all) {
1738                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1739                         vector_foreach_slot (pgp->paths, pp1, j) {
1740                                 oldpriority = pp1->priority;
1741                                 conf = get_multipath_config();
1742                                 pathinfo(pp1, conf, DI_PRIO);
1743                                 put_multipath_config(conf);
1744                                 if (pp1->priority != oldpriority)
1745                                         changed = 1;
1746                         }
1747                 }
1748                 return changed;
1749         }
1750         oldpriority = pp->priority;
1751         conf = get_multipath_config();
1752         if (pp->state != PATH_DOWN)
1753                 pathinfo(pp, conf, DI_PRIO);
1754         put_multipath_config(conf);
1755
1756         if (pp->priority == oldpriority)
1757                 return 0;
1758         return 1;
1759 }
1760
1761 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1762 {
1763         if (reload_map(vecs, mpp, refresh, 1))
1764                 return 1;
1765
1766         dm_lib_release();
1767         if (setup_multipath(vecs, mpp) != 0)
1768                 return 1;
1769         sync_map_state(mpp);
1770
1771         return 0;
1772 }
1773
1774 void repair_path(struct path * pp)
1775 {
1776         if (pp->state != PATH_DOWN)
1777                 return;
1778
1779         checker_repair(&pp->checker);
1780         LOG_MSG(1, checker_message(&pp->checker));
1781 }
1782
1783 /*
1784  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1785  * and '0' otherwise
1786  */
1787 int
1788 check_path (struct vectors * vecs, struct path * pp, int ticks)
1789 {
1790         int newstate;
1791         int new_path_up = 0;
1792         int chkr_new_path_up = 0;
1793         int add_active;
1794         int disable_reinstate = 0;
1795         int oldchkrstate = pp->chkrstate;
1796         int retrigger_tries, checkint;
1797         struct config *conf;
1798         int ret;
1799
1800         if ((pp->initialized == INIT_OK ||
1801              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1802                 return 0;
1803
1804         if (pp->tick)
1805                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1806         if (pp->tick)
1807                 return 0; /* don't check this path yet */
1808
1809         conf = get_multipath_config();
1810         retrigger_tries = conf->retrigger_tries;
1811         checkint = conf->checkint;
1812         put_multipath_config(conf);
1813         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1814             pp->retriggers < retrigger_tries) {
1815                 condlog(2, "%s: triggering change event to reinitialize",
1816                         pp->dev);
1817                 pp->initialized = INIT_REQUESTED_UDEV;
1818                 pp->retriggers++;
1819                 sysfs_attr_set_value(pp->udev, "uevent", "change",
1820                                      strlen("change"));
1821                 return 0;
1822         }
1823
1824         /*
1825          * provision a next check soonest,
1826          * in case we exit abnormaly from here
1827          */
1828         pp->tick = checkint;
1829
1830         newstate = path_offline(pp);
1831         /*
1832          * Wait for uevent for removed paths;
1833          * some LLDDs like zfcp keep paths unavailable
1834          * without sending uevents.
1835          */
1836         if (newstate == PATH_REMOVED)
1837                 newstate = PATH_DOWN;
1838
1839         if (newstate == PATH_UP) {
1840                 conf = get_multipath_config();
1841                 newstate = get_state(pp, conf, 1, newstate);
1842                 put_multipath_config(conf);
1843         } else
1844                 checker_clear_message(&pp->checker);
1845
1846         if (pp->wwid_changed) {
1847                 condlog(2, "%s: path wwid has changed. Refusing to use",
1848                         pp->dev);
1849                 newstate = PATH_DOWN;
1850         }
1851
1852         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1853                 condlog(2, "%s: unusable path", pp->dev);
1854                 conf = get_multipath_config();
1855                 pathinfo(pp, conf, 0);
1856                 put_multipath_config(conf);
1857                 return 1;
1858         }
1859         if (!pp->mpp) {
1860                 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1861                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1862                         condlog(2, "%s: add missing path", pp->dev);
1863                         conf = get_multipath_config();
1864                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1865                         if (ret == PATHINFO_OK) {
1866                                 ev_add_path(pp, vecs, 1);
1867                                 pp->tick = 1;
1868                         } else if (ret == PATHINFO_SKIPPED) {
1869                                 put_multipath_config(conf);
1870                                 return -1;
1871                         }
1872                         put_multipath_config(conf);
1873                 }
1874                 return 0;
1875         }
1876         /*
1877          * Async IO in flight. Keep the previous path state
1878          * and reschedule as soon as possible
1879          */
1880         if (newstate == PATH_PENDING) {
1881                 pp->tick = 1;
1882                 return 0;
1883         }
1884         /*
1885          * Synchronize with kernel state
1886          */
1887         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1888                 condlog(1, "%s: Could not synchronize with kernel state",
1889                         pp->dev);
1890                 pp->dmstate = PSTATE_UNDEF;
1891         }
1892         /* if update_multipath_strings orphaned the path, quit early */
1893         if (!pp->mpp)
1894                 return 0;
1895
1896         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1897                 pp->state = PATH_SHAKY;
1898                 /*
1899                  * to reschedule as soon as possible,so that this path can
1900                  * be recoverd in time
1901                  */
1902                 pp->tick = 1;
1903                 return 1;
1904         }
1905
1906         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1907              pp->wait_checks > 0) {
1908                 if (pp->mpp->nr_active > 0) {
1909                         pp->state = PATH_DELAYED;
1910                         pp->wait_checks--;
1911                         return 1;
1912                 } else
1913                         pp->wait_checks = 0;
1914         }
1915
1916         /*
1917          * don't reinstate failed path, if its in stand-by
1918          * and if target supports only implicit tpgs mode.
1919          * this will prevent unnecessary i/o by dm on stand-by
1920          * paths if there are no other active paths in map.
1921          */
1922         disable_reinstate = (newstate == PATH_GHOST &&
1923                             pp->mpp->nr_active == 0 &&
1924                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1925
1926         pp->chkrstate = newstate;
1927         if (newstate != pp->state) {
1928                 int oldstate = pp->state;
1929                 pp->state = newstate;
1930
1931                 LOG_MSG(1, checker_message(&pp->checker));
1932
1933                 /*
1934                  * upon state change, reset the checkint
1935                  * to the shortest delay
1936                  */
1937                 conf = get_multipath_config();
1938                 pp->checkint = conf->checkint;
1939                 put_multipath_config(conf);
1940
1941                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1942                         /*
1943                          * proactively fail path in the DM
1944                          */
1945                         if (oldstate == PATH_UP ||
1946                             oldstate == PATH_GHOST) {
1947                                 fail_path(pp, 1);
1948                                 if (pp->mpp->delay_wait_checks > 0 &&
1949                                     pp->watch_checks > 0) {
1950                                         pp->wait_checks = pp->mpp->delay_wait_checks;
1951                                         pp->watch_checks = 0;
1952                                 }
1953                         }else
1954                                 fail_path(pp, 0);
1955
1956                         /*
1957                          * cancel scheduled failback
1958                          */
1959                         pp->mpp->failback_tick = 0;
1960
1961                         pp->mpp->stat_path_failures++;
1962                         repair_path(pp);
1963                         return 1;
1964                 }
1965
1966                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1967                         if ( pp->mpp && pp->mpp->prflag ){
1968                                 /*
1969                                  * Check Persistent Reservation.
1970                                  */
1971                         condlog(2, "%s: checking persistent reservation "
1972                                 "registration", pp->dev);
1973                         mpath_pr_event_handle(pp);
1974                         }
1975                 }
1976
1977                 /*
1978                  * reinstate this path
1979                  */
1980                 if (oldstate != PATH_UP &&
1981                     oldstate != PATH_GHOST) {
1982                         if (pp->mpp->delay_watch_checks > 0)
1983                                 pp->watch_checks = pp->mpp->delay_watch_checks;
1984                         add_active = 1;
1985                 } else {
1986                         if (pp->watch_checks > 0)
1987                                 pp->watch_checks--;
1988                         add_active = 0;
1989                 }
1990                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
1991                         condlog(3, "%s: reload map", pp->dev);
1992                         ev_add_path(pp, vecs, 1);
1993                         pp->tick = 1;
1994                         return 0;
1995                 }
1996                 new_path_up = 1;
1997
1998                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1999                         chkr_new_path_up = 1;
2000
2001                 /*
2002                  * if at least one path is up in a group, and
2003                  * the group is disabled, re-enable it
2004                  */
2005                 if (newstate == PATH_UP)
2006                         enable_group(pp);
2007         }
2008         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2009                 if ((pp->dmstate == PSTATE_FAILED ||
2010                     pp->dmstate == PSTATE_UNDEF) &&
2011                     !disable_reinstate) {
2012                         /* Clear IO errors */
2013                         if (reinstate_path(pp, 0)) {
2014                                 condlog(3, "%s: reload map", pp->dev);
2015                                 ev_add_path(pp, vecs, 1);
2016                                 pp->tick = 1;
2017                                 return 0;
2018                         }
2019                 } else {
2020                         unsigned int max_checkint;
2021                         LOG_MSG(4, checker_message(&pp->checker));
2022                         conf = get_multipath_config();
2023                         max_checkint = conf->max_checkint;
2024                         put_multipath_config(conf);
2025                         if (pp->checkint != max_checkint) {
2026                                 /*
2027                                  * double the next check delay.
2028                                  * max at conf->max_checkint
2029                                  */
2030                                 if (pp->checkint < (max_checkint / 2))
2031                                         pp->checkint = 2 * pp->checkint;
2032                                 else
2033                                         pp->checkint = max_checkint;
2034
2035                                 condlog(4, "%s: delay next check %is",
2036                                         pp->dev_t, pp->checkint);
2037                         }
2038                         if (pp->watch_checks > 0)
2039                                 pp->watch_checks--;
2040                         pp->tick = pp->checkint;
2041                 }
2042         }
2043         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2044                 if (pp->dmstate == PSTATE_ACTIVE ||
2045                     pp->dmstate == PSTATE_UNDEF)
2046                         fail_path(pp, 0);
2047                 if (newstate == PATH_DOWN) {
2048                         int log_checker_err;
2049
2050                         conf = get_multipath_config();
2051                         log_checker_err = conf->log_checker_err;
2052                         put_multipath_config(conf);
2053                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2054                                 LOG_MSG(3, checker_message(&pp->checker));
2055                         else
2056                                 LOG_MSG(2, checker_message(&pp->checker));
2057                 }
2058         }
2059
2060         pp->state = newstate;
2061         repair_path(pp);
2062
2063         if (pp->mpp->wait_for_udev)
2064                 return 1;
2065         /*
2066          * path prio refreshing
2067          */
2068         condlog(4, "path prio refresh");
2069
2070         if (update_prio(pp, new_path_up) &&
2071             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2072              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2073                 update_path_groups(pp->mpp, vecs, !new_path_up);
2074         else if (need_switch_pathgroup(pp->mpp, 0)) {
2075                 if (pp->mpp->pgfailback > 0 &&
2076                     (new_path_up || pp->mpp->failback_tick <= 0))
2077                         pp->mpp->failback_tick =
2078                                 pp->mpp->pgfailback + 1;
2079                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2080                          (chkr_new_path_up && followover_should_failback(pp)))
2081                         switch_pathgroup(pp->mpp);
2082         }
2083         return 1;
2084 }
2085
2086 static void init_path_check_interval(struct vectors *vecs)
2087 {
2088         struct config *conf;
2089         struct path *pp;
2090         unsigned int i;
2091
2092         vector_foreach_slot (vecs->pathvec, pp, i) {
2093                 conf = get_multipath_config();
2094                 pp->checkint = conf->checkint;
2095                 put_multipath_config(conf);
2096         }
2097 }
2098
2099 static void *
2100 checkerloop (void *ap)
2101 {
2102         struct vectors *vecs;
2103         struct path *pp;
2104         int count = 0;
2105         unsigned int i;
2106         struct timespec last_time;
2107         struct config *conf;
2108
2109         pthread_cleanup_push(rcu_unregister, NULL);
2110         rcu_register_thread();
2111         mlockall(MCL_CURRENT | MCL_FUTURE);
2112         vecs = (struct vectors *)ap;
2113         condlog(2, "path checkers start up");
2114
2115         /* Tweak start time for initial path check */
2116         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2117                 last_time.tv_sec = 0;
2118         else
2119                 last_time.tv_sec -= 1;
2120
2121         while (1) {
2122                 struct timespec diff_time, start_time, end_time;
2123                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2124
2125                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2126                         start_time.tv_sec = 0;
2127                 if (start_time.tv_sec && last_time.tv_sec) {
2128                         timespecsub(&start_time, &last_time, &diff_time);
2129                         condlog(4, "tick (%lu.%06lu secs)",
2130                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2131                         last_time = start_time;
2132                         ticks = diff_time.tv_sec;
2133                 } else {
2134                         ticks = 1;
2135                         condlog(4, "tick (%d ticks)", ticks);
2136                 }
2137 #ifdef USE_SYSTEMD
2138                 if (use_watchdog)
2139                         sd_notify(0, "WATCHDOG=1");
2140 #endif
2141                 rc = set_config_state(DAEMON_RUNNING);
2142                 if (rc == ETIMEDOUT) {
2143                         condlog(4, "timeout waiting for DAEMON_IDLE");
2144                         continue;
2145                 }
2146
2147                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2148                 lock(&vecs->lock);
2149                 pthread_testcancel();
2150                 vector_foreach_slot (vecs->pathvec, pp, i) {
2151                         rc = check_path(vecs, pp, ticks);
2152                         if (rc < 0) {
2153                                 vector_del_slot(vecs->pathvec, i);
2154                                 free_path(pp);
2155                                 i--;
2156                         } else
2157                                 num_paths += rc;
2158                 }
2159                 lock_cleanup_pop(vecs->lock);
2160
2161                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2162                 lock(&vecs->lock);
2163                 pthread_testcancel();
2164                 defered_failback_tick(vecs->mpvec);
2165                 retry_count_tick(vecs->mpvec);
2166                 missing_uev_wait_tick(vecs);
2167                 ghost_delay_tick(vecs);
2168                 lock_cleanup_pop(vecs->lock);
2169
2170                 if (count)
2171                         count--;
2172                 else {
2173                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2174                         lock(&vecs->lock);
2175                         pthread_testcancel();
2176                         condlog(4, "map garbage collection");
2177                         mpvec_garbage_collector(vecs);
2178                         count = MAPGCINT;
2179                         lock_cleanup_pop(vecs->lock);
2180                 }
2181
2182                 diff_time.tv_nsec = 0;
2183                 if (start_time.tv_sec &&
2184                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2185                         timespecsub(&end_time, &start_time, &diff_time);
2186                         if (num_paths) {
2187                                 unsigned int max_checkint;
2188
2189                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
2190                                         num_paths, num_paths > 1 ? "s" : "",
2191                                         diff_time.tv_sec,
2192                                         diff_time.tv_nsec / 1000);
2193                                 conf = get_multipath_config();
2194                                 max_checkint = conf->max_checkint;
2195                                 put_multipath_config(conf);
2196                                 if (diff_time.tv_sec > max_checkint)
2197                                         condlog(1, "path checkers took longer "
2198                                                 "than %lu seconds, consider "
2199                                                 "increasing max_polling_interval",
2200                                                 diff_time.tv_sec);
2201                         }
2202                 }
2203                 check_foreign();
2204                 post_config_state(DAEMON_IDLE);
2205                 conf = get_multipath_config();
2206                 strict_timing = conf->strict_timing;
2207                 put_multipath_config(conf);
2208                 if (!strict_timing)
2209                         sleep(1);
2210                 else {
2211                         if (diff_time.tv_nsec) {
2212                                 diff_time.tv_sec = 0;
2213                                 diff_time.tv_nsec =
2214                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2215                         } else
2216                                 diff_time.tv_sec = 1;
2217
2218                         condlog(3, "waiting for %lu.%06lu secs",
2219                                 diff_time.tv_sec,
2220                                 diff_time.tv_nsec / 1000);
2221                         if (nanosleep(&diff_time, NULL) != 0) {
2222                                 condlog(3, "nanosleep failed with error %d",
2223                                         errno);
2224                                 conf = get_multipath_config();
2225                                 conf->strict_timing = 0;
2226                                 put_multipath_config(conf);
2227                                 break;
2228                         }
2229                 }
2230         }
2231         pthread_cleanup_pop(1);
2232         return NULL;
2233 }
2234
2235 int
2236 configure (struct vectors * vecs)
2237 {
2238         struct multipath * mpp;
2239         struct path * pp;
2240         vector mpvec;
2241         int i, ret;
2242         struct config *conf;
2243         static int force_reload = FORCE_RELOAD_WEAK;
2244
2245         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2246                 condlog(0, "couldn't allocate path vec in configure");
2247                 return 1;
2248         }
2249
2250         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2251                 condlog(0, "couldn't allocate multipath vec in configure");
2252                 return 1;
2253         }
2254
2255         if (!(mpvec = vector_alloc())) {
2256                 condlog(0, "couldn't allocate new maps vec in configure");
2257                 return 1;
2258         }
2259
2260         /*
2261          * probe for current path (from sysfs) and map (from dm) sets
2262          */
2263         ret = path_discovery(vecs->pathvec, DI_ALL);
2264         if (ret < 0) {
2265                 condlog(0, "configure failed at path discovery");
2266                 return 1;
2267         }
2268
2269         vector_foreach_slot (vecs->pathvec, pp, i){
2270                 conf = get_multipath_config();
2271                 if (filter_path(conf, pp) > 0){
2272                         vector_del_slot(vecs->pathvec, i);
2273                         free_path(pp);
2274                         i--;
2275                 }
2276                 else
2277                         pp->checkint = conf->checkint;
2278                 put_multipath_config(conf);
2279         }
2280         if (map_discovery(vecs)) {
2281                 condlog(0, "configure failed at map discovery");
2282                 return 1;
2283         }
2284
2285         /*
2286          * create new set of maps & push changed ones into dm
2287          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2288          * superfluous ACT_RELOAD ioctls. Later calls are done
2289          * with FORCE_RELOAD_YES.
2290          */
2291         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2292         if (force_reload == FORCE_RELOAD_WEAK)
2293                 force_reload = FORCE_RELOAD_YES;
2294         if (ret) {
2295                 condlog(0, "configure failed while coalescing paths");
2296                 return 1;
2297         }
2298
2299         /*
2300          * may need to remove some maps which are no longer relevant
2301          * e.g., due to blacklist changes in conf file
2302          */
2303         if (coalesce_maps(vecs, mpvec)) {
2304                 condlog(0, "configure failed while coalescing maps");
2305                 return 1;
2306         }
2307
2308         dm_lib_release();
2309
2310         sync_maps_state(mpvec);
2311         vector_foreach_slot(mpvec, mpp, i){
2312                 remember_wwid(mpp->wwid);
2313                 update_map_pr(mpp);
2314         }
2315
2316         /*
2317          * purge dm of old maps
2318          */
2319         remove_maps(vecs);
2320
2321         /*
2322          * save new set of maps formed by considering current path state
2323          */
2324         vector_free(vecs->mpvec);
2325         vecs->mpvec = mpvec;
2326
2327         /*
2328          * start dm event waiter threads for these new maps
2329          */
2330         vector_foreach_slot(vecs->mpvec, mpp, i) {
2331                 if (wait_for_events(mpp, vecs)) {
2332                         remove_map(mpp, vecs, 1);
2333                         i--;
2334                         continue;
2335                 }
2336                 if (setup_multipath(vecs, mpp))
2337                         i--;
2338         }
2339         return 0;
2340 }
2341
2342 int
2343 need_to_delay_reconfig(struct vectors * vecs)
2344 {
2345         struct multipath *mpp;
2346         int i;
2347
2348         if (!VECTOR_SIZE(vecs->mpvec))
2349                 return 0;
2350
2351         vector_foreach_slot(vecs->mpvec, mpp, i) {
2352                 if (mpp->wait_for_udev)
2353                         return 1;
2354         }
2355         return 0;
2356 }
2357
2358 void rcu_free_config(struct rcu_head *head)
2359 {
2360         struct config *conf = container_of(head, struct config, rcu);
2361
2362         free_config(conf);
2363 }
2364
2365 int
2366 reconfigure (struct vectors * vecs)
2367 {
2368         struct config * old, *conf;
2369
2370         conf = load_config(DEFAULT_CONFIGFILE);
2371         if (!conf)
2372                 return 1;
2373
2374         /*
2375          * free old map and path vectors ... they use old conf state
2376          */
2377         if (VECTOR_SIZE(vecs->mpvec))
2378                 remove_maps_and_stop_waiters(vecs);
2379
2380         free_pathvec(vecs->pathvec, FREE_PATHS);
2381         vecs->pathvec = NULL;
2382         delete_all_foreign();
2383
2384         /* Re-read any timezone changes */
2385         tzset();
2386
2387         dm_tgt_version(conf->version, TGT_MPATH);
2388         if (verbosity)
2389                 conf->verbosity = verbosity;
2390         if (bindings_read_only)
2391                 conf->bindings_read_only = bindings_read_only;
2392         if (conf->find_multipaths) {
2393                 condlog(2, "find_multipaths is set: -n is implied");
2394                 ignore_new_devs = 1;
2395         }
2396         if (ignore_new_devs)
2397                 conf->ignore_new_devs = ignore_new_devs;
2398         uxsock_timeout = conf->uxsock_timeout;
2399
2400         old = rcu_dereference(multipath_conf);
2401         rcu_assign_pointer(multipath_conf, conf);
2402         call_rcu(&old->rcu, rcu_free_config);
2403
2404         configure(vecs);
2405
2406
2407         return 0;
2408 }
2409
2410 static struct vectors *
2411 init_vecs (void)
2412 {
2413         struct vectors * vecs;
2414
2415         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2416
2417         if (!vecs)
2418                 return NULL;
2419
2420         pthread_mutex_init(&vecs->lock.mutex, NULL);
2421
2422         return vecs;
2423 }
2424
2425 static void *
2426 signal_set(int signo, void (*func) (int))
2427 {
2428         int r;
2429         struct sigaction sig;
2430         struct sigaction osig;
2431
2432         sig.sa_handler = func;
2433         sigemptyset(&sig.sa_mask);
2434         sig.sa_flags = 0;
2435
2436         r = sigaction(signo, &sig, &osig);
2437
2438         if (r < 0)
2439                 return (SIG_ERR);
2440         else
2441                 return (osig.sa_handler);
2442 }
2443
2444 void
2445 handle_signals(bool nonfatal)
2446 {
2447         if (exit_sig) {
2448                 condlog(2, "exit (signal)");
2449                 exit_sig = 0;
2450                 exit_daemon();
2451         }
2452         if (!nonfatal)
2453                 return;
2454         if (reconfig_sig) {
2455                 condlog(2, "reconfigure (signal)");
2456                 set_config_state(DAEMON_CONFIGURE);
2457         }
2458         if (log_reset_sig) {
2459                 condlog(2, "reset log (signal)");
2460                 if (logsink == 1)
2461                         log_thread_reset();
2462         }
2463         reconfig_sig = 0;
2464         log_reset_sig = 0;
2465 }
2466
2467 static void
2468 sighup (int sig)
2469 {
2470         reconfig_sig = 1;
2471 }
2472
2473 static void
2474 sigend (int sig)
2475 {
2476         exit_sig = 1;
2477 }
2478
2479 static void
2480 sigusr1 (int sig)
2481 {
2482         log_reset_sig = 1;
2483 }
2484
2485 static void
2486 sigusr2 (int sig)
2487 {
2488         condlog(3, "SIGUSR2 received");
2489 }
2490
2491 static void
2492 signal_init(void)
2493 {
2494         sigset_t set;
2495
2496         /* block all signals */
2497         sigfillset(&set);
2498         /* SIGPIPE occurs if logging fails */
2499         sigdelset(&set, SIGPIPE);
2500         pthread_sigmask(SIG_SETMASK, &set, NULL);
2501
2502         /* Other signals will be unblocked in the uxlsnr thread */
2503         signal_set(SIGHUP, sighup);
2504         signal_set(SIGUSR1, sigusr1);
2505         signal_set(SIGUSR2, sigusr2);
2506         signal_set(SIGINT, sigend);
2507         signal_set(SIGTERM, sigend);
2508         signal_set(SIGPIPE, sigend);
2509 }
2510
2511 static void
2512 setscheduler (void)
2513 {
2514         int res;
2515         static struct sched_param sched_param = {
2516                 .sched_priority = 99
2517         };
2518
2519         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2520
2521         if (res == -1)
2522                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2523         return;
2524 }
2525
2526 static void
2527 set_oom_adj (void)
2528 {
2529 #ifdef OOM_SCORE_ADJ_MIN
2530         int retry = 1;
2531         char *file = "/proc/self/oom_score_adj";
2532         int score = OOM_SCORE_ADJ_MIN;
2533 #else
2534         int retry = 0;
2535         char *file = "/proc/self/oom_adj";
2536         int score = OOM_ADJUST_MIN;
2537 #endif
2538         FILE *fp;
2539         struct stat st;
2540         char *envp;
2541
2542         envp = getenv("OOMScoreAdjust");
2543         if (envp) {
2544                 condlog(3, "Using systemd provided OOMScoreAdjust");
2545                 return;
2546         }
2547         do {
2548                 if (stat(file, &st) == 0){
2549                         fp = fopen(file, "w");
2550                         if (!fp) {
2551                                 condlog(0, "couldn't fopen %s : %s", file,
2552                                         strerror(errno));
2553                                 return;
2554                         }
2555                         fprintf(fp, "%i", score);
2556                         fclose(fp);
2557                         return;
2558                 }
2559                 if (errno != ENOENT) {
2560                         condlog(0, "couldn't stat %s : %s", file,
2561                                 strerror(errno));
2562                         return;
2563                 }
2564 #ifdef OOM_ADJUST_MIN
2565                 file = "/proc/self/oom_adj";
2566                 score = OOM_ADJUST_MIN;
2567 #else
2568                 retry = 0;
2569 #endif
2570         } while (retry--);
2571         condlog(0, "couldn't adjust oom score");
2572 }
2573
2574 static int
2575 child (void * param)
2576 {
2577         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2578         pthread_attr_t log_attr, misc_attr, uevent_attr;
2579         struct vectors * vecs;
2580         struct multipath * mpp;
2581         int i;
2582 #ifdef USE_SYSTEMD
2583         unsigned long checkint;
2584         int startup_done = 0;
2585 #endif
2586         int rc;
2587         int pid_fd = -1;
2588         struct config *conf;
2589         char *envp;
2590
2591         mlockall(MCL_CURRENT | MCL_FUTURE);
2592         signal_init();
2593         rcu_init();
2594
2595         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2596         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2597         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2598         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2599
2600         if (logsink == 1) {
2601                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2602                 log_thread_start(&log_attr);
2603                 pthread_attr_destroy(&log_attr);
2604         }
2605         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2606         if (pid_fd < 0) {
2607                 condlog(1, "failed to create pidfile");
2608                 if (logsink == 1)
2609                         log_thread_stop();
2610                 exit(1);
2611         }
2612
2613         post_config_state(DAEMON_START);
2614
2615         condlog(2, "--------start up--------");
2616         condlog(2, "read " DEFAULT_CONFIGFILE);
2617
2618         conf = load_config(DEFAULT_CONFIGFILE);
2619         if (!conf)
2620                 goto failed;
2621
2622         if (verbosity)
2623                 conf->verbosity = verbosity;
2624         if (bindings_read_only)
2625                 conf->bindings_read_only = bindings_read_only;
2626         if (ignore_new_devs)
2627                 conf->ignore_new_devs = ignore_new_devs;
2628         uxsock_timeout = conf->uxsock_timeout;
2629         rcu_assign_pointer(multipath_conf, conf);
2630         if (init_checkers(conf->multipath_dir)) {
2631                 condlog(0, "failed to initialize checkers");
2632                 goto failed;
2633         }
2634         if (init_prio(conf->multipath_dir)) {
2635                 condlog(0, "failed to initialize prioritizers");
2636                 goto failed;
2637         }
2638         /* Failing this is non-fatal */
2639
2640         init_foreign(conf->multipath_dir);
2641
2642         if (poll_dmevents)
2643                 poll_dmevents = dmevent_poll_supported();
2644         setlogmask(LOG_UPTO(conf->verbosity + 3));
2645
2646         envp = getenv("LimitNOFILE");
2647
2648         if (envp) {
2649                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2650         } else if (conf->max_fds) {
2651                 struct rlimit fd_limit;
2652
2653                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2654                         condlog(0, "can't get open fds limit: %s",
2655                                 strerror(errno));
2656                         fd_limit.rlim_cur = 0;
2657                         fd_limit.rlim_max = 0;
2658                 }
2659                 if (fd_limit.rlim_cur < conf->max_fds) {
2660                         fd_limit.rlim_cur = conf->max_fds;
2661                         if (fd_limit.rlim_max < conf->max_fds)
2662                                 fd_limit.rlim_max = conf->max_fds;
2663                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2664                                 condlog(0, "can't set open fds limit to "
2665                                         "%lu/%lu : %s",
2666                                         fd_limit.rlim_cur, fd_limit.rlim_max,
2667                                         strerror(errno));
2668                         } else {
2669                                 condlog(3, "set open fds limit to %lu/%lu",
2670                                         fd_limit.rlim_cur, fd_limit.rlim_max);
2671                         }
2672                 }
2673
2674         }
2675
2676         vecs = gvecs = init_vecs();
2677         if (!vecs)
2678                 goto failed;
2679
2680         setscheduler();
2681         set_oom_adj();
2682
2683 #ifdef USE_SYSTEMD
2684         envp = getenv("WATCHDOG_USEC");
2685         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2686                 /* Value is in microseconds */
2687                 conf->max_checkint = checkint / 1000000;
2688                 /* Rescale checkint */
2689                 if (conf->checkint > conf->max_checkint)
2690                         conf->checkint = conf->max_checkint;
2691                 else
2692                         conf->checkint = conf->max_checkint / 4;
2693                 condlog(3, "enabling watchdog, interval %d max %d",
2694                         conf->checkint, conf->max_checkint);
2695                 use_watchdog = conf->checkint;
2696         }
2697 #endif
2698         /*
2699          * Startup done, invalidate configuration
2700          */
2701         conf = NULL;
2702
2703         /*
2704          * Signal start of configuration
2705          */
2706         post_config_state(DAEMON_CONFIGURE);
2707
2708         init_path_check_interval(vecs);
2709
2710         if (poll_dmevents) {
2711                 if (init_dmevent_waiter(vecs)) {
2712                         condlog(0, "failed to allocate dmevents waiter info");
2713                         goto failed;
2714                 }
2715                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2716                                          wait_dmevents, NULL))) {
2717                         condlog(0, "failed to create dmevent waiter thread: %d",
2718                                 rc);
2719                         goto failed;
2720                 }
2721         }
2722
2723         /*
2724          * Start uevent listener early to catch events
2725          */
2726         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2727                 condlog(0, "failed to create uevent thread: %d", rc);
2728                 goto failed;
2729         }
2730         pthread_attr_destroy(&uevent_attr);
2731         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2732                 condlog(0, "failed to create cli listener: %d", rc);
2733                 goto failed;
2734         }
2735
2736         /*
2737          * start threads
2738          */
2739         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2740                 condlog(0,"failed to create checker loop thread: %d", rc);
2741                 goto failed;
2742         }
2743         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2744                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2745                 goto failed;
2746         }
2747         pthread_attr_destroy(&misc_attr);
2748
2749         while (running_state != DAEMON_SHUTDOWN) {
2750                 pthread_cleanup_push(config_cleanup, NULL);
2751                 pthread_mutex_lock(&config_lock);
2752                 if (running_state != DAEMON_CONFIGURE &&
2753                     running_state != DAEMON_SHUTDOWN) {
2754                         pthread_cond_wait(&config_cond, &config_lock);
2755                 }
2756                 pthread_cleanup_pop(1);
2757                 if (running_state == DAEMON_CONFIGURE) {
2758                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2759                         lock(&vecs->lock);
2760                         pthread_testcancel();
2761                         if (!need_to_delay_reconfig(vecs)) {
2762                                 reconfigure(vecs);
2763                         } else {
2764                                 conf = get_multipath_config();
2765                                 conf->delayed_reconfig = 1;
2766                                 put_multipath_config(conf);
2767                         }
2768                         lock_cleanup_pop(vecs->lock);
2769                         post_config_state(DAEMON_IDLE);
2770 #ifdef USE_SYSTEMD
2771                         if (!startup_done) {
2772                                 sd_notify(0, "READY=1");
2773                                 startup_done = 1;
2774                         }
2775 #endif
2776                 }
2777         }
2778
2779         lock(&vecs->lock);
2780         conf = get_multipath_config();
2781         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
2782                 vector_foreach_slot(vecs->mpvec, mpp, i)
2783                         dm_queue_if_no_path(mpp->alias, 0);
2784         put_multipath_config(conf);
2785         remove_maps_and_stop_waiters(vecs);
2786         unlock(&vecs->lock);
2787
2788         pthread_cancel(check_thr);
2789         pthread_cancel(uevent_thr);
2790         pthread_cancel(uxlsnr_thr);
2791         pthread_cancel(uevq_thr);
2792         if (poll_dmevents)
2793                 pthread_cancel(dmevent_thr);
2794
2795         pthread_join(check_thr, NULL);
2796         pthread_join(uevent_thr, NULL);
2797         pthread_join(uxlsnr_thr, NULL);
2798         pthread_join(uevq_thr, NULL);
2799         if (poll_dmevents)
2800                 pthread_join(dmevent_thr, NULL);
2801
2802         stop_io_err_stat_thread();
2803
2804         lock(&vecs->lock);
2805         free_pathvec(vecs->pathvec, FREE_PATHS);
2806         vecs->pathvec = NULL;
2807         unlock(&vecs->lock);
2808
2809         pthread_mutex_destroy(&vecs->lock.mutex);
2810         FREE(vecs);
2811         vecs = NULL;
2812
2813         cleanup_foreign();
2814         cleanup_checkers();
2815         cleanup_prio();
2816         if (poll_dmevents)
2817                 cleanup_dmevent_waiter();
2818
2819         dm_lib_release();
2820         dm_lib_exit();
2821
2822         /* We're done here */
2823         condlog(3, "unlink pidfile");
2824         unlink(DEFAULT_PIDFILE);
2825
2826         condlog(2, "--------shut down-------");
2827
2828         if (logsink == 1)
2829                 log_thread_stop();
2830
2831         /*
2832          * Freeing config must be done after condlog() and dm_lib_exit(),
2833          * because logging functions like dlog() and dm_write_log()
2834          * reference the config.
2835          */
2836         conf = rcu_dereference(multipath_conf);
2837         rcu_assign_pointer(multipath_conf, NULL);
2838         call_rcu(&conf->rcu, rcu_free_config);
2839         udev_unref(udev);
2840         udev = NULL;
2841         pthread_attr_destroy(&waiter_attr);
2842         pthread_attr_destroy(&io_err_stat_attr);
2843 #ifdef _DEBUG_
2844         dbg_free_final(NULL);
2845 #endif
2846
2847 #ifdef USE_SYSTEMD
2848         sd_notify(0, "ERRNO=0");
2849 #endif
2850         exit(0);
2851
2852 failed:
2853 #ifdef USE_SYSTEMD
2854         sd_notify(0, "ERRNO=1");
2855 #endif
2856         if (pid_fd >= 0)
2857                 close(pid_fd);
2858         exit(1);
2859 }
2860
2861 static int
2862 daemonize(void)
2863 {
2864         int pid;
2865         int dev_null_fd;
2866
2867         if( (pid = fork()) < 0){
2868                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2869                 return -1;
2870         }
2871         else if (pid != 0)
2872                 return pid;
2873
2874         setsid();
2875
2876         if ( (pid = fork()) < 0)
2877                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2878         else if (pid != 0)
2879                 _exit(0);
2880
2881         if (chdir("/") < 0)
2882                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2883
2884         dev_null_fd = open("/dev/null", O_RDWR);
2885         if (dev_null_fd < 0){
2886                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2887                         strerror(errno));
2888                 _exit(0);
2889         }
2890
2891         close(STDIN_FILENO);
2892         if (dup(dev_null_fd) < 0) {
2893                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2894                         strerror(errno));
2895                 _exit(0);
2896         }
2897         close(STDOUT_FILENO);
2898         if (dup(dev_null_fd) < 0) {
2899                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2900                         strerror(errno));
2901                 _exit(0);
2902         }
2903         close(STDERR_FILENO);
2904         if (dup(dev_null_fd) < 0) {
2905                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2906                         strerror(errno));
2907                 _exit(0);
2908         }
2909         close(dev_null_fd);
2910         daemon_pid = getpid();
2911         return 0;
2912 }
2913
2914 int
2915 main (int argc, char *argv[])
2916 {
2917         extern char *optarg;
2918         extern int optind;
2919         int arg;
2920         int err;
2921         int foreground = 0;
2922         struct config *conf;
2923
2924         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2925                                    "Manipulated through RCU");
2926         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2927                 "Suppress complaints about unprotected running_state reads");
2928         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2929                 "Suppress complaints about this scalar variable");
2930
2931         logsink = 1;
2932
2933         if (getuid() != 0) {
2934                 fprintf(stderr, "need to be root\n");
2935                 exit(1);
2936         }
2937
2938         /* make sure we don't lock any path */
2939         if (chdir("/") < 0)
2940                 fprintf(stderr, "can't chdir to root directory : %s\n",
2941                         strerror(errno));
2942         umask(umask(077) | 022);
2943
2944         pthread_cond_init_mono(&config_cond);
2945
2946         udev = udev_new();
2947         libmp_udev_set_sync_support(0);
2948
2949         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
2950                 switch(arg) {
2951                 case 'd':
2952                         foreground = 1;
2953                         if (logsink > 0)
2954                                 logsink = 0;
2955                         //debug=1; /* ### comment me out ### */
2956                         break;
2957                 case 'v':
2958                         if (sizeof(optarg) > sizeof(char *) ||
2959                             !isdigit(optarg[0]))
2960                                 exit(1);
2961
2962                         verbosity = atoi(optarg);
2963                         break;
2964                 case 's':
2965                         logsink = -1;
2966                         break;
2967                 case 'k':
2968                         conf = load_config(DEFAULT_CONFIGFILE);
2969                         if (!conf)
2970                                 exit(1);
2971                         if (verbosity)
2972                                 conf->verbosity = verbosity;
2973                         uxsock_timeout = conf->uxsock_timeout;
2974                         uxclnt(optarg, uxsock_timeout + 100);
2975                         free_config(conf);
2976                         exit(0);
2977                 case 'B':
2978                         bindings_read_only = 1;
2979                         break;
2980                 case 'n':
2981                         ignore_new_devs = 1;
2982                         break;
2983                 case 'w':
2984                         poll_dmevents = 0;
2985                         break;
2986                 default:
2987                         fprintf(stderr, "Invalid argument '-%c'\n",
2988                                 optopt);
2989                         exit(1);
2990                 }
2991         }
2992         if (optind < argc) {
2993                 char cmd[CMDSIZE];
2994                 char * s = cmd;
2995                 char * c = s;
2996
2997                 conf = load_config(DEFAULT_CONFIGFILE);
2998                 if (!conf)
2999                         exit(1);
3000                 if (verbosity)
3001                         conf->verbosity = verbosity;
3002                 uxsock_timeout = conf->uxsock_timeout;
3003                 memset(cmd, 0x0, CMDSIZE);
3004                 while (optind < argc) {
3005                         if (strchr(argv[optind], ' '))
3006                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3007                         else
3008                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3009                         optind++;
3010                 }
3011                 c += snprintf(c, s + CMDSIZE - c, "\n");
3012                 uxclnt(s, uxsock_timeout + 100);
3013                 free_config(conf);
3014                 exit(0);
3015         }
3016
3017         if (foreground) {
3018                 if (!isatty(fileno(stdout)))
3019                         setbuf(stdout, NULL);
3020                 err = 0;
3021                 daemon_pid = getpid();
3022         } else
3023                 err = daemonize();
3024
3025         if (err < 0)
3026                 /* error */
3027                 exit(1);
3028         else if (err > 0)
3029                 /* parent dies */
3030                 exit(0);
3031         else
3032                 /* child lives */
3033                 return (child(NULL));
3034 }
3035
3036 void *  mpath_pr_event_handler_fn (void * pathp )
3037 {
3038         struct multipath * mpp;
3039         int i, ret, isFound;
3040         struct path * pp = (struct path *)pathp;
3041         struct prout_param_descriptor *param;
3042         struct prin_resp *resp;
3043
3044         rcu_register_thread();
3045         mpp = pp->mpp;
3046
3047         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3048         if (!resp){
3049                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3050                 goto out;
3051         }
3052
3053         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3054         if (ret != MPATH_PR_SUCCESS )
3055         {
3056                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3057                 goto out;
3058         }
3059
3060         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3061                         resp->prin_descriptor.prin_readkeys.additional_length );
3062
3063         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3064         {
3065                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3066                 ret = MPATH_PR_SUCCESS;
3067                 goto out;
3068         }
3069         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3070                 get_be64(mpp->reservation_key));
3071
3072         isFound =0;
3073         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3074         {
3075                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3076                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3077                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3078                 {
3079                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3080                         isFound =1;
3081                         break;
3082                 }
3083         }
3084         if (!isFound)
3085         {
3086                 condlog(0, "%s: Either device not registered or ", pp->dev);
3087                 condlog(0, "host is not authorised for registration. Skip path");
3088                 ret = MPATH_PR_OTHER;
3089                 goto out;
3090         }
3091
3092         param= malloc(sizeof(struct prout_param_descriptor));
3093         memset(param, 0 , sizeof(struct prout_param_descriptor));
3094         memcpy(param->sa_key, &mpp->reservation_key, 8);
3095         param->num_transportid = 0;
3096
3097         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3098
3099         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3100         if (ret != MPATH_PR_SUCCESS )
3101         {
3102                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3103         }
3104         mpp->prflag = 1;
3105
3106         free(param);
3107 out:
3108         if (resp)
3109                 free(resp);
3110         rcu_unregister_thread();
3111         return NULL;
3112 }
3113
3114 int mpath_pr_event_handle(struct path *pp)
3115 {
3116         pthread_t thread;
3117         int rc;
3118         pthread_attr_t attr;
3119         struct multipath * mpp;
3120
3121         mpp = pp->mpp;
3122
3123         if (get_be64(mpp->reservation_key))
3124                 return -1;
3125
3126         pthread_attr_init(&attr);
3127         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3128
3129         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3130         if (rc) {
3131                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3132                 return -1;
3133         }
3134         pthread_attr_destroy(&attr);
3135         rc = pthread_join(thread, NULL);
3136         return 0;
3137 }