libmultipath: remove max_fds code duplication
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69
70 #include "mpath_cmd.h"
71 #include "mpath_persist.h"
72
73 #include "prioritizers/alua_rtpg.h"
74
75 #include "main.h"
76 #include "pidfile.h"
77 #include "uxlsnr.h"
78 #include "uxclnt.h"
79 #include "cli.h"
80 #include "cli_handlers.h"
81 #include "lock.h"
82 #include "waiter.h"
83 #include "dmevents.h"
84 #include "io_err_stat.h"
85 #include "wwids.h"
86 #include "foreign.h"
87 #include "../third-party/valgrind/drd.h"
88
89 #define FILE_NAME_SIZE 256
90 #define CMDSIZE 160
91
92 #define LOG_MSG(a, b) \
93 do { \
94         if (pp->offline) \
95                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
96         else if (strlen(b)) \
97                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
98 } while(0)
99
100 struct mpath_event_param
101 {
102         char * devname;
103         struct multipath *mpp;
104 };
105
106 int logsink;
107 int uxsock_timeout;
108 int verbosity;
109 int bindings_read_only;
110 int ignore_new_devs;
111 #ifdef NO_DMEVENTS_POLL
112 int poll_dmevents = 0;
113 #else
114 int poll_dmevents = 1;
115 #endif
116 enum daemon_status running_state = DAEMON_INIT;
117 pid_t daemon_pid;
118 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
119 pthread_cond_t config_cond;
120
121 /*
122  * global copy of vecs for use in sig handlers
123  */
124 struct vectors * gvecs;
125
126 struct udev * udev;
127
128 struct config *multipath_conf;
129
130 /* Local variables */
131 static volatile sig_atomic_t exit_sig;
132 static volatile sig_atomic_t reconfig_sig;
133 static volatile sig_atomic_t log_reset_sig;
134
135 const char *
136 daemon_status(void)
137 {
138         switch (running_state) {
139         case DAEMON_INIT:
140                 return "init";
141         case DAEMON_START:
142                 return "startup";
143         case DAEMON_CONFIGURE:
144                 return "configure";
145         case DAEMON_IDLE:
146                 return "idle";
147         case DAEMON_RUNNING:
148                 return "running";
149         case DAEMON_SHUTDOWN:
150                 return "shutdown";
151         }
152         return NULL;
153 }
154
155 /*
156  * I love you too, systemd ...
157  */
158 const char *
159 sd_notify_status(void)
160 {
161         switch (running_state) {
162         case DAEMON_INIT:
163                 return "STATUS=init";
164         case DAEMON_START:
165                 return "STATUS=startup";
166         case DAEMON_CONFIGURE:
167                 return "STATUS=configure";
168         case DAEMON_IDLE:
169         case DAEMON_RUNNING:
170                 return "STATUS=up";
171         case DAEMON_SHUTDOWN:
172                 return "STATUS=shutdown";
173         }
174         return NULL;
175 }
176
177 #ifdef USE_SYSTEMD
178 static void do_sd_notify(enum daemon_status old_state)
179 {
180         /*
181          * Checkerloop switches back and forth between idle and running state.
182          * No need to tell systemd each time.
183          * These notifications cause a lot of overhead on dbus.
184          */
185         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
186             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
187                 return;
188         sd_notify(0, sd_notify_status());
189 }
190 #endif
191
192 static void config_cleanup(void *arg)
193 {
194         pthread_mutex_unlock(&config_lock);
195 }
196
197 void post_config_state(enum daemon_status state)
198 {
199         pthread_mutex_lock(&config_lock);
200         if (state != running_state) {
201                 enum daemon_status old_state = running_state;
202
203                 running_state = state;
204                 pthread_cond_broadcast(&config_cond);
205 #ifdef USE_SYSTEMD
206                 do_sd_notify(old_state);
207 #endif
208         }
209         pthread_mutex_unlock(&config_lock);
210 }
211
212 int set_config_state(enum daemon_status state)
213 {
214         int rc = 0;
215
216         pthread_cleanup_push(config_cleanup, NULL);
217         pthread_mutex_lock(&config_lock);
218         if (running_state != state) {
219                 enum daemon_status old_state = running_state;
220
221                 if (running_state != DAEMON_IDLE) {
222                         struct timespec ts;
223
224                         clock_gettime(CLOCK_MONOTONIC, &ts);
225                         ts.tv_sec += 1;
226                         rc = pthread_cond_timedwait(&config_cond,
227                                                     &config_lock, &ts);
228                 }
229                 if (!rc) {
230                         running_state = state;
231                         pthread_cond_broadcast(&config_cond);
232 #ifdef USE_SYSTEMD
233                         do_sd_notify(old_state);
234 #endif
235                 }
236         }
237         pthread_cleanup_pop(1);
238         return rc;
239 }
240
241 struct config *get_multipath_config(void)
242 {
243         rcu_read_lock();
244         return rcu_dereference(multipath_conf);
245 }
246
247 void put_multipath_config(void *arg)
248 {
249         rcu_read_unlock();
250 }
251
252 static int
253 need_switch_pathgroup (struct multipath * mpp, int refresh)
254 {
255         struct pathgroup * pgp;
256         struct path * pp;
257         unsigned int i, j;
258         struct config *conf;
259         int bestpg;
260
261         if (!mpp)
262                 return 0;
263
264         /*
265          * Refresh path priority values
266          */
267         if (refresh) {
268                 vector_foreach_slot (mpp->pg, pgp, i) {
269                         vector_foreach_slot (pgp->paths, pp, j) {
270                                 conf = get_multipath_config();
271                                 pthread_cleanup_push(put_multipath_config,
272                                                      conf);
273                                 pathinfo(pp, conf, DI_PRIO);
274                                 pthread_cleanup_pop(1);
275                         }
276                 }
277         }
278
279         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
280                 return 0;
281
282         bestpg = select_path_group(mpp);
283         if (mpp->pgfailback == -FAILBACK_MANUAL)
284                 return 0;
285
286         mpp->bestpg = bestpg;
287         if (mpp->bestpg != mpp->nextpg)
288                 return 1;
289
290         return 0;
291 }
292
293 static void
294 switch_pathgroup (struct multipath * mpp)
295 {
296         mpp->stat_switchgroup++;
297         dm_switchgroup(mpp->alias, mpp->bestpg);
298         condlog(2, "%s: switch to path group #%i",
299                  mpp->alias, mpp->bestpg);
300 }
301
302 static int
303 wait_for_events(struct multipath *mpp, struct vectors *vecs)
304 {
305         if (poll_dmevents)
306                 return watch_dmevents(mpp->alias);
307         else
308                 return start_waiter_thread(mpp, vecs);
309 }
310
311 static void
312 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
313 {
314         /* devices are automatically removed by the dmevent polling code,
315          * so they don't need to be manually removed here */
316         if (!poll_dmevents)
317                 stop_waiter_thread(mpp, vecs);
318         remove_map(mpp, vecs, PURGE_VEC);
319 }
320
321 static void
322 remove_maps_and_stop_waiters(struct vectors *vecs)
323 {
324         int i;
325         struct multipath * mpp;
326
327         if (!vecs)
328                 return;
329
330         if (!poll_dmevents) {
331                 vector_foreach_slot(vecs->mpvec, mpp, i)
332                         stop_waiter_thread(mpp, vecs);
333         }
334         else
335                 unwatch_all_dmevents();
336
337         remove_maps(vecs);
338 }
339
340 static void
341 set_multipath_wwid (struct multipath * mpp)
342 {
343         if (strlen(mpp->wwid))
344                 return;
345
346         dm_get_uuid(mpp->alias, mpp->wwid);
347 }
348
349 static void set_no_path_retry(struct multipath *mpp)
350 {
351         char is_queueing = 0;
352
353         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
354         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
355                 is_queueing = 1;
356
357         switch (mpp->no_path_retry) {
358         case NO_PATH_RETRY_UNDEF:
359                 break;
360         case NO_PATH_RETRY_FAIL:
361                 if (is_queueing)
362                         dm_queue_if_no_path(mpp->alias, 0);
363                 break;
364         case NO_PATH_RETRY_QUEUE:
365                 if (!is_queueing)
366                         dm_queue_if_no_path(mpp->alias, 1);
367                 break;
368         default:
369                 if (mpp->nr_active > 0) {
370                         mpp->retry_tick = 0;
371                         dm_queue_if_no_path(mpp->alias, 1);
372                 } else if (is_queueing && mpp->retry_tick == 0)
373                         enter_recovery_mode(mpp);
374                 break;
375         }
376 }
377
378 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
379                       int reset)
380 {
381         if (dm_get_info(mpp->alias, &mpp->dmi)) {
382                 /* Error accessing table */
383                 condlog(3, "%s: cannot access table", mpp->alias);
384                 goto out;
385         }
386
387         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
388                 condlog(0, "%s: failed to setup multipath", mpp->alias);
389                 goto out;
390         }
391
392         if (reset) {
393                 set_no_path_retry(mpp);
394                 if (VECTOR_SIZE(mpp->paths) != 0)
395                         dm_cancel_deferred_remove(mpp);
396         }
397
398         return 0;
399 out:
400         remove_map_and_stop_waiter(mpp, vecs);
401         return 1;
402 }
403
404 int update_multipath (struct vectors *vecs, char *mapname, int reset)
405 {
406         struct multipath *mpp;
407         struct pathgroup  *pgp;
408         struct path *pp;
409         int i, j;
410
411         mpp = find_mp_by_alias(vecs->mpvec, mapname);
412
413         if (!mpp) {
414                 condlog(3, "%s: multipath map not found", mapname);
415                 return 2;
416         }
417
418         if (__setup_multipath(vecs, mpp, reset))
419                 return 1; /* mpp freed in setup_multipath */
420
421         /*
422          * compare checkers states with DM states
423          */
424         vector_foreach_slot (mpp->pg, pgp, i) {
425                 vector_foreach_slot (pgp->paths, pp, j) {
426                         if (pp->dmstate != PSTATE_FAILED)
427                                 continue;
428
429                         if (pp->state != PATH_DOWN) {
430                                 struct config *conf;
431                                 int oldstate = pp->state;
432                                 int checkint;
433
434                                 conf = get_multipath_config();
435                                 checkint = conf->checkint;
436                                 put_multipath_config(conf);
437                                 condlog(2, "%s: mark as failed", pp->dev);
438                                 mpp->stat_path_failures++;
439                                 pp->state = PATH_DOWN;
440                                 if (oldstate == PATH_UP ||
441                                     oldstate == PATH_GHOST)
442                                         update_queue_mode_del_path(mpp);
443
444                                 /*
445                                  * if opportune,
446                                  * schedule the next check earlier
447                                  */
448                                 if (pp->tick > checkint)
449                                         pp->tick = checkint;
450                         }
451                 }
452         }
453         return 0;
454 }
455
456 static int
457 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
458 {
459         int retries = 3;
460         char params[PARAMS_SIZE] = {0};
461
462 retry:
463         condlog(4, "%s: updating new map", mpp->alias);
464         if (adopt_paths(vecs->pathvec, mpp)) {
465                 condlog(0, "%s: failed to adopt paths for new map update",
466                         mpp->alias);
467                 retries = -1;
468                 goto fail;
469         }
470         verify_paths(mpp, vecs);
471         mpp->action = ACT_RELOAD;
472
473         extract_hwe_from_path(mpp);
474         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
475                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
476                 retries = -1;
477                 goto fail;
478         }
479         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
480                 condlog(0, "%s: map_udate sleep", mpp->alias);
481                 sleep(1);
482                 goto retry;
483         }
484         dm_lib_release();
485
486 fail:
487         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
488                 condlog(0, "%s: failed to create new map", mpp->alias);
489                 remove_map(mpp, vecs, 1);
490                 return 1;
491         }
492
493         if (setup_multipath(vecs, mpp))
494                 return 1;
495
496         sync_map_state(mpp);
497
498         if (retries < 0)
499                 condlog(0, "%s: failed reload in new map update", mpp->alias);
500         return 0;
501 }
502
503 static struct multipath *
504 add_map_without_path (struct vectors *vecs, const char *alias)
505 {
506         struct multipath * mpp = alloc_multipath();
507         struct config *conf;
508
509         if (!mpp)
510                 return NULL;
511         if (!alias) {
512                 FREE(mpp);
513                 return NULL;
514         }
515
516         mpp->alias = STRDUP(alias);
517
518         if (dm_get_info(mpp->alias, &mpp->dmi)) {
519                 condlog(3, "%s: cannot access table", mpp->alias);
520                 goto out;
521         }
522         set_multipath_wwid(mpp);
523         conf = get_multipath_config();
524         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
525         put_multipath_config(conf);
526
527         if (update_multipath_table(mpp, vecs->pathvec, 1))
528                 goto out;
529         if (update_multipath_status(mpp))
530                 goto out;
531
532         if (!vector_alloc_slot(vecs->mpvec))
533                 goto out;
534
535         vector_set_slot(vecs->mpvec, mpp);
536
537         if (update_map(mpp, vecs, 1) != 0) /* map removed */
538                 return NULL;
539
540         return mpp;
541 out:
542         remove_map(mpp, vecs, PURGE_VEC);
543         return NULL;
544 }
545
546 static int
547 coalesce_maps(struct vectors *vecs, vector nmpv)
548 {
549         struct multipath * ompp;
550         vector ompv = vecs->mpvec;
551         unsigned int i, reassign_maps;
552         struct config *conf;
553
554         conf = get_multipath_config();
555         reassign_maps = conf->reassign_maps;
556         put_multipath_config(conf);
557         vector_foreach_slot (ompv, ompp, i) {
558                 condlog(3, "%s: coalesce map", ompp->alias);
559                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
560                         /*
561                          * remove all current maps not allowed by the
562                          * current configuration
563                          */
564                         if (dm_flush_map(ompp->alias)) {
565                                 condlog(0, "%s: unable to flush devmap",
566                                         ompp->alias);
567                                 /*
568                                  * may be just because the device is open
569                                  */
570                                 if (setup_multipath(vecs, ompp) != 0) {
571                                         i--;
572                                         continue;
573                                 }
574                                 if (!vector_alloc_slot(nmpv))
575                                         return 1;
576
577                                 vector_set_slot(nmpv, ompp);
578
579                                 vector_del_slot(ompv, i);
580                                 i--;
581                         }
582                         else {
583                                 dm_lib_release();
584                                 condlog(2, "%s devmap removed", ompp->alias);
585                         }
586                 } else if (reassign_maps) {
587                         condlog(3, "%s: Reassign existing device-mapper"
588                                 " devices", ompp->alias);
589                         dm_reassign(ompp->alias);
590                 }
591         }
592         return 0;
593 }
594
595 static void
596 sync_maps_state(vector mpvec)
597 {
598         unsigned int i;
599         struct multipath *mpp;
600
601         vector_foreach_slot (mpvec, mpp, i)
602                 sync_map_state(mpp);
603 }
604
605 static int
606 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
607 {
608         int r;
609
610         if (nopaths)
611                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
612         else
613                 r = dm_flush_map(mpp->alias);
614         /*
615          * clear references to this map before flushing so we can ignore
616          * the spurious uevent we may generate with the dm_flush_map call below
617          */
618         if (r) {
619                 /*
620                  * May not really be an error -- if the map was already flushed
621                  * from the device mapper by dmsetup(8) for instance.
622                  */
623                 if (r == 1)
624                         condlog(0, "%s: can't flush", mpp->alias);
625                 else {
626                         condlog(2, "%s: devmap deferred remove", mpp->alias);
627                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
628                 }
629                 return r;
630         }
631         else {
632                 dm_lib_release();
633                 condlog(2, "%s: map flushed", mpp->alias);
634         }
635
636         orphan_paths(vecs->pathvec, mpp);
637         remove_map_and_stop_waiter(mpp, vecs);
638
639         return 0;
640 }
641
642 static int
643 uev_add_map (struct uevent * uev, struct vectors * vecs)
644 {
645         char *alias;
646         int major = -1, minor = -1, rc;
647
648         condlog(3, "%s: add map (uevent)", uev->kernel);
649         alias = uevent_get_dm_name(uev);
650         if (!alias) {
651                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
652                 major = uevent_get_major(uev);
653                 minor = uevent_get_minor(uev);
654                 alias = dm_mapname(major, minor);
655                 if (!alias) {
656                         condlog(2, "%s: mapname not found for %d:%d",
657                                 uev->kernel, major, minor);
658                         return 1;
659                 }
660         }
661         pthread_cleanup_push(cleanup_lock, &vecs->lock);
662         lock(&vecs->lock);
663         pthread_testcancel();
664         rc = ev_add_map(uev->kernel, alias, vecs);
665         lock_cleanup_pop(vecs->lock);
666         FREE(alias);
667         return rc;
668 }
669
670 /*
671  * ev_add_map expects that the multipath device already exists in kernel
672  * before it is called. It just adds a device to multipathd or updates an
673  * existing device.
674  */
675 int
676 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
677 {
678         struct multipath * mpp;
679         int delayed_reconfig, reassign_maps;
680         struct config *conf;
681
682         if (!dm_is_mpath(alias)) {
683                 condlog(4, "%s: not a multipath map", alias);
684                 return 0;
685         }
686
687         mpp = find_mp_by_alias(vecs->mpvec, alias);
688
689         if (mpp) {
690                 if (mpp->wait_for_udev > 1) {
691                         condlog(2, "%s: performing delayed actions",
692                                 mpp->alias);
693                         if (update_map(mpp, vecs, 0))
694                                 /* setup multipathd removed the map */
695                                 return 1;
696                 }
697                 conf = get_multipath_config();
698                 delayed_reconfig = conf->delayed_reconfig;
699                 reassign_maps = conf->reassign_maps;
700                 put_multipath_config(conf);
701                 if (mpp->wait_for_udev) {
702                         mpp->wait_for_udev = 0;
703                         if (delayed_reconfig &&
704                             !need_to_delay_reconfig(vecs)) {
705                                 condlog(2, "reconfigure (delayed)");
706                                 set_config_state(DAEMON_CONFIGURE);
707                                 return 0;
708                         }
709                 }
710                 /*
711                  * Not really an error -- we generate our own uevent
712                  * if we create a multipath mapped device as a result
713                  * of uev_add_path
714                  */
715                 if (reassign_maps) {
716                         condlog(3, "%s: Reassign existing device-mapper devices",
717                                 alias);
718                         dm_reassign(alias);
719                 }
720                 return 0;
721         }
722         condlog(2, "%s: adding map", alias);
723
724         /*
725          * now we can register the map
726          */
727         if ((mpp = add_map_without_path(vecs, alias))) {
728                 sync_map_state(mpp);
729                 condlog(2, "%s: devmap %s registered", alias, dev);
730                 return 0;
731         } else {
732                 condlog(2, "%s: ev_add_map failed", dev);
733                 return 1;
734         }
735 }
736
737 static int
738 uev_remove_map (struct uevent * uev, struct vectors * vecs)
739 {
740         char *alias;
741         int minor;
742         struct multipath *mpp;
743
744         condlog(3, "%s: remove map (uevent)", uev->kernel);
745         alias = uevent_get_dm_name(uev);
746         if (!alias) {
747                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
748                 return 0;
749         }
750         minor = uevent_get_minor(uev);
751
752         pthread_cleanup_push(cleanup_lock, &vecs->lock);
753         lock(&vecs->lock);
754         pthread_testcancel();
755         mpp = find_mp_by_minor(vecs->mpvec, minor);
756
757         if (!mpp) {
758                 condlog(2, "%s: devmap not registered, can't remove",
759                         uev->kernel);
760                 goto out;
761         }
762         if (strcmp(mpp->alias, alias)) {
763                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
764                         mpp->alias, mpp->dmi->minor, minor);
765                 goto out;
766         }
767
768         orphan_paths(vecs->pathvec, mpp);
769         remove_map_and_stop_waiter(mpp, vecs);
770 out:
771         lock_cleanup_pop(vecs->lock);
772         FREE(alias);
773         return 0;
774 }
775
776 /* Called from CLI handler */
777 int
778 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
779 {
780         struct multipath * mpp;
781
782         mpp = find_mp_by_minor(vecs->mpvec, minor);
783
784         if (!mpp) {
785                 condlog(2, "%s: devmap not registered, can't remove",
786                         devname);
787                 return 1;
788         }
789         if (strcmp(mpp->alias, alias)) {
790                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
791                         mpp->alias, mpp->dmi->minor, minor);
792                 return 1;
793         }
794         return flush_map(mpp, vecs, 0);
795 }
796
797 static int
798 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
799 {
800         struct path *pp;
801         int ret = 0, i;
802         struct config *conf;
803
804         condlog(3, "%s: add path (uevent)", uev->kernel);
805         if (strstr(uev->kernel, "..") != NULL) {
806                 /*
807                  * Don't allow relative device names in the pathvec
808                  */
809                 condlog(0, "%s: path name is invalid", uev->kernel);
810                 return 1;
811         }
812
813         pthread_cleanup_push(cleanup_lock, &vecs->lock);
814         lock(&vecs->lock);
815         pthread_testcancel();
816         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
817         if (pp) {
818                 int r;
819
820                 condlog(3, "%s: spurious uevent, path already in pathvec",
821                         uev->kernel);
822                 if (!pp->mpp && !strlen(pp->wwid)) {
823                         condlog(3, "%s: reinitialize path", uev->kernel);
824                         udev_device_unref(pp->udev);
825                         pp->udev = udev_device_ref(uev->udev);
826                         conf = get_multipath_config();
827                         pthread_cleanup_push(put_multipath_config, conf);
828                         r = pathinfo(pp, conf,
829                                      DI_ALL | DI_BLACKLIST);
830                         pthread_cleanup_pop(1);
831                         if (r == PATHINFO_OK)
832                                 ret = ev_add_path(pp, vecs, need_do_map);
833                         else if (r == PATHINFO_SKIPPED) {
834                                 condlog(3, "%s: remove blacklisted path",
835                                         uev->kernel);
836                                 i = find_slot(vecs->pathvec, (void *)pp);
837                                 if (i != -1)
838                                         vector_del_slot(vecs->pathvec, i);
839                                 free_path(pp);
840                         } else {
841                                 condlog(0, "%s: failed to reinitialize path",
842                                         uev->kernel);
843                                 ret = 1;
844                         }
845                 }
846         }
847         lock_cleanup_pop(vecs->lock);
848         if (pp)
849                 return ret;
850
851         /*
852          * get path vital state
853          */
854         conf = get_multipath_config();
855         pthread_cleanup_push(put_multipath_config, conf);
856         ret = alloc_path_with_pathinfo(conf, uev->udev,
857                                        uev->wwid, DI_ALL, &pp);
858         pthread_cleanup_pop(1);
859         if (!pp) {
860                 if (ret == PATHINFO_SKIPPED)
861                         return 0;
862                 condlog(3, "%s: failed to get path info", uev->kernel);
863                 return 1;
864         }
865         pthread_cleanup_push(cleanup_lock, &vecs->lock);
866         lock(&vecs->lock);
867         pthread_testcancel();
868         ret = store_path(vecs->pathvec, pp);
869         if (!ret) {
870                 conf = get_multipath_config();
871                 pp->checkint = conf->checkint;
872                 put_multipath_config(conf);
873                 ret = ev_add_path(pp, vecs, need_do_map);
874         } else {
875                 condlog(0, "%s: failed to store path info, "
876                         "dropping event",
877                         uev->kernel);
878                 free_path(pp);
879                 ret = 1;
880         }
881         lock_cleanup_pop(vecs->lock);
882         return ret;
883 }
884
885 /*
886  * returns:
887  * 0: added
888  * 1: error
889  */
890 int
891 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
892 {
893         struct multipath * mpp;
894         char params[PARAMS_SIZE] = {0};
895         int retries = 3;
896         int start_waiter = 0;
897         int ret;
898
899         /*
900          * need path UID to go any further
901          */
902         if (strlen(pp->wwid) == 0) {
903                 condlog(0, "%s: failed to get path uid", pp->dev);
904                 goto fail; /* leave path added to pathvec */
905         }
906         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
907         if (mpp && mpp->wait_for_udev &&
908             (pathcount(mpp, PATH_UP) > 0 ||
909              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
910               mpp->ghost_delay_tick <= 0))) {
911                 /* if wait_for_udev is set and valid paths exist */
912                 condlog(3, "%s: delaying path addition until %s is fully initialized",
913                         pp->dev, mpp->alias);
914                 mpp->wait_for_udev = 2;
915                 orphan_path(pp, "waiting for create to complete");
916                 return 0;
917         }
918
919         pp->mpp = mpp;
920 rescan:
921         if (mpp) {
922                 if (pp->size && mpp->size != pp->size) {
923                         condlog(0, "%s: failed to add new path %s, "
924                                 "device size mismatch",
925                                 mpp->alias, pp->dev);
926                         int i = find_slot(vecs->pathvec, (void *)pp);
927                         if (i != -1)
928                                 vector_del_slot(vecs->pathvec, i);
929                         free_path(pp);
930                         return 1;
931                 }
932
933                 condlog(4,"%s: adopting all paths for path %s",
934                         mpp->alias, pp->dev);
935                 if (adopt_paths(vecs->pathvec, mpp))
936                         goto fail; /* leave path added to pathvec */
937
938                 verify_paths(mpp, vecs);
939                 mpp->action = ACT_RELOAD;
940                 extract_hwe_from_path(mpp);
941         } else {
942                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
943                         orphan_path(pp, "only one path");
944                         return 0;
945                 }
946                 condlog(4,"%s: creating new map", pp->dev);
947                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
948                         mpp->action = ACT_CREATE;
949                         /*
950                          * We don't depend on ACT_CREATE, as domap will
951                          * set it to ACT_NOTHING when complete.
952                          */
953                         start_waiter = 1;
954                 }
955                 if (!start_waiter)
956                         goto fail; /* leave path added to pathvec */
957         }
958
959         /* persistent reservation check*/
960         mpath_pr_event_handle(pp);
961
962         if (!need_do_map)
963                 return 0;
964
965         if (!dm_map_present(mpp->alias)) {
966                 mpp->action = ACT_CREATE;
967                 start_waiter = 1;
968         }
969         /*
970          * push the map to the device-mapper
971          */
972         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
973                 condlog(0, "%s: failed to setup map for addition of new "
974                         "path %s", mpp->alias, pp->dev);
975                 goto fail_map;
976         }
977         /*
978          * reload the map for the multipath mapped device
979          */
980 retry:
981         ret = domap(mpp, params, 1);
982         if (ret <= 0) {
983                 if (ret < 0 && retries-- > 0) {
984                         condlog(0, "%s: retry domap for addition of new "
985                                 "path %s", mpp->alias, pp->dev);
986                         sleep(1);
987                         goto retry;
988                 }
989                 condlog(0, "%s: failed in domap for addition of new "
990                         "path %s", mpp->alias, pp->dev);
991                 /*
992                  * deal with asynchronous uevents :((
993                  */
994                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
995                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
996                         sleep(1);
997                         update_mpp_paths(mpp, vecs->pathvec);
998                         goto rescan;
999                 }
1000                 else if (mpp->action == ACT_RELOAD)
1001                         condlog(0, "%s: giving up reload", mpp->alias);
1002                 else
1003                         goto fail_map;
1004         }
1005         dm_lib_release();
1006
1007         if ((mpp->action == ACT_CREATE ||
1008              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1009             wait_for_events(mpp, vecs))
1010                         goto fail_map;
1011
1012         /*
1013          * update our state from kernel regardless of create or reload
1014          */
1015         if (setup_multipath(vecs, mpp))
1016                 goto fail; /* if setup_multipath fails, it removes the map */
1017
1018         sync_map_state(mpp);
1019
1020         if (retries >= 0) {
1021                 condlog(2, "%s [%s]: path added to devmap %s",
1022                         pp->dev, pp->dev_t, mpp->alias);
1023                 return 0;
1024         } else
1025                 goto fail;
1026
1027 fail_map:
1028         remove_map(mpp, vecs, 1);
1029 fail:
1030         orphan_path(pp, "failed to add path");
1031         return 1;
1032 }
1033
1034 static int
1035 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1036 {
1037         struct path *pp;
1038         int ret;
1039
1040         condlog(3, "%s: remove path (uevent)", uev->kernel);
1041         delete_foreign(uev->udev);
1042
1043         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1044         lock(&vecs->lock);
1045         pthread_testcancel();
1046         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1047         if (pp)
1048                 ret = ev_remove_path(pp, vecs, need_do_map);
1049         lock_cleanup_pop(vecs->lock);
1050         if (!pp) {
1051                 /* Not an error; path might have been purged earlier */
1052                 condlog(0, "%s: path already removed", uev->kernel);
1053                 return 0;
1054         }
1055         return ret;
1056 }
1057
1058 int
1059 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1060 {
1061         struct multipath * mpp;
1062         int i, retval = 0;
1063         char params[PARAMS_SIZE] = {0};
1064
1065         /*
1066          * avoid referring to the map of an orphaned path
1067          */
1068         if ((mpp = pp->mpp)) {
1069                 /*
1070                  * transform the mp->pg vector of vectors of paths
1071                  * into a mp->params string to feed the device-mapper
1072                  */
1073                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1074                         condlog(0, "%s: failed to update paths",
1075                                 mpp->alias);
1076                         goto fail;
1077                 }
1078
1079                 /*
1080                  * Make sure mpp->hwe doesn't point to freed memory
1081                  * We call extract_hwe_from_path() below to restore mpp->hwe
1082                  */
1083                 if (mpp->hwe == pp->hwe)
1084                         mpp->hwe = NULL;
1085
1086                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1087                         vector_del_slot(mpp->paths, i);
1088
1089                 /*
1090                  * remove the map IF removing the last path
1091                  */
1092                 if (VECTOR_SIZE(mpp->paths) == 0) {
1093                         char alias[WWID_SIZE];
1094
1095                         /*
1096                          * flush_map will fail if the device is open
1097                          */
1098                         strlcpy(alias, mpp->alias, WWID_SIZE);
1099                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1100                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1101                                 mpp->retry_tick = 0;
1102                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1103                                 mpp->disable_queueing = 1;
1104                                 mpp->stat_map_failures++;
1105                                 dm_queue_if_no_path(mpp->alias, 0);
1106                         }
1107                         if (!flush_map(mpp, vecs, 1)) {
1108                                 condlog(2, "%s: removed map after"
1109                                         " removing all paths",
1110                                         alias);
1111                                 retval = 0;
1112                                 goto out;
1113                         }
1114                         /*
1115                          * Not an error, continue
1116                          */
1117                 }
1118
1119                 if (mpp->hwe == NULL)
1120                         extract_hwe_from_path(mpp);
1121
1122                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1123                         condlog(0, "%s: failed to setup map for"
1124                                 " removal of path %s", mpp->alias, pp->dev);
1125                         goto fail;
1126                 }
1127
1128                 if (mpp->wait_for_udev) {
1129                         mpp->wait_for_udev = 2;
1130                         goto out;
1131                 }
1132
1133                 if (!need_do_map)
1134                         goto out;
1135                 /*
1136                  * reload the map
1137                  */
1138                 mpp->action = ACT_RELOAD;
1139                 if (domap(mpp, params, 1) <= 0) {
1140                         condlog(0, "%s: failed in domap for "
1141                                 "removal of path %s",
1142                                 mpp->alias, pp->dev);
1143                         retval = 1;
1144                 } else {
1145                         /*
1146                          * update our state from kernel
1147                          */
1148                         if (setup_multipath(vecs, mpp))
1149                                 return 1;
1150                         sync_map_state(mpp);
1151
1152                         condlog(2, "%s [%s]: path removed from map %s",
1153                                 pp->dev, pp->dev_t, mpp->alias);
1154                 }
1155         }
1156
1157 out:
1158         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1159                 vector_del_slot(vecs->pathvec, i);
1160
1161         free_path(pp);
1162
1163         return retval;
1164
1165 fail:
1166         remove_map_and_stop_waiter(mpp, vecs);
1167         return 1;
1168 }
1169
1170 static int
1171 uev_update_path (struct uevent *uev, struct vectors * vecs)
1172 {
1173         int ro, retval = 0, rc;
1174         struct path * pp;
1175         struct config *conf;
1176         int disable_changed_wwids;
1177         int needs_reinit = 0;
1178
1179         switch ((rc = change_foreign(uev->udev))) {
1180         case FOREIGN_OK:
1181                 /* known foreign path, ignore event */
1182                 return 0;
1183         case FOREIGN_IGNORED:
1184                 break;
1185         case FOREIGN_ERR:
1186                 condlog(3, "%s: error in change_foreign", __func__);
1187                 break;
1188         default:
1189                 condlog(1, "%s: return code %d of change_forein is unsupported",
1190                         __func__, rc);
1191                 break;
1192         }
1193
1194         conf = get_multipath_config();
1195         disable_changed_wwids = conf->disable_changed_wwids;
1196         put_multipath_config(conf);
1197
1198         ro = uevent_get_disk_ro(uev);
1199
1200         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1201         lock(&vecs->lock);
1202         pthread_testcancel();
1203
1204         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1205         if (pp) {
1206                 struct multipath *mpp = pp->mpp;
1207                 char wwid[WWID_SIZE];
1208
1209                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1210                         needs_reinit = 1;
1211                         goto out;
1212                 }
1213                 /* Don't deal with other types of failed initialization
1214                  * now. check_path will handle it */
1215                 if (!strlen(pp->wwid))
1216                         goto out;
1217
1218                 strcpy(wwid, pp->wwid);
1219                 get_uid(pp, pp->state, uev->udev);
1220
1221                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1222                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1223                                 uev->kernel, wwid, pp->wwid,
1224                                 (disable_changed_wwids ? "disallowing" :
1225                                  "continuing"));
1226                         strcpy(pp->wwid, wwid);
1227                         if (disable_changed_wwids) {
1228                                 if (!pp->wwid_changed) {
1229                                         pp->wwid_changed = 1;
1230                                         pp->tick = 1;
1231                                         if (pp->mpp)
1232                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1233                                 }
1234                                 goto out;
1235                         }
1236                 } else {
1237                         pp->wwid_changed = 0;
1238                         udev_device_unref(pp->udev);
1239                         pp->udev = udev_device_ref(uev->udev);
1240                         conf = get_multipath_config();
1241                         pthread_cleanup_push(put_multipath_config, conf);
1242                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1243                                 condlog(1, "%s: pathinfo failed after change uevent",
1244                                         uev->kernel);
1245                         pthread_cleanup_pop(1);
1246                 }
1247
1248                 if (mpp && ro >= 0) {
1249                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1250
1251                         if (mpp->wait_for_udev)
1252                                 mpp->wait_for_udev = 2;
1253                         else {
1254                                 if (ro == 1)
1255                                         pp->mpp->force_readonly = 1;
1256                                 retval = reload_map(vecs, mpp, 0, 1);
1257                                 pp->mpp->force_readonly = 0;
1258                                 condlog(2, "%s: map %s reloaded (retval %d)",
1259                                         uev->kernel, mpp->alias, retval);
1260                         }
1261                 }
1262         }
1263 out:
1264         lock_cleanup_pop(vecs->lock);
1265         if (!pp) {
1266                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1267                 if (uev->udev) {
1268                         int flag = DI_SYSFS | DI_WWID;
1269
1270                         conf = get_multipath_config();
1271                         pthread_cleanup_push(put_multipath_config, conf);
1272                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1273                         pthread_cleanup_pop(1);
1274
1275                         if (retval == PATHINFO_SKIPPED) {
1276                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1277                                 return 0;
1278                         }
1279                 }
1280
1281                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1282         }
1283         if (needs_reinit)
1284                 retval = uev_add_path(uev, vecs, 1);
1285         return retval;
1286 }
1287
1288 static int
1289 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1290 {
1291         char *action = NULL, *devt = NULL;
1292         struct path *pp;
1293         int r = 1;
1294
1295         action = uevent_get_dm_action(uev);
1296         if (!action)
1297                 return 1;
1298         if (strncmp(action, "PATH_FAILED", 11))
1299                 goto out;
1300         devt = uevent_get_dm_path(uev);
1301         if (!devt) {
1302                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1303                 goto out;
1304         }
1305
1306         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1307         lock(&vecs->lock);
1308         pthread_testcancel();
1309         pp = find_path_by_devt(vecs->pathvec, devt);
1310         if (!pp)
1311                 goto out_lock;
1312         r = io_err_stat_handle_pathfail(pp);
1313         if (r)
1314                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1315                                 pp->dev);
1316 out_lock:
1317         lock_cleanup_pop(vecs->lock);
1318         FREE(devt);
1319         FREE(action);
1320         return r;
1321 out:
1322         FREE(action);
1323         return 1;
1324 }
1325
1326 static int
1327 map_discovery (struct vectors * vecs)
1328 {
1329         struct multipath * mpp;
1330         unsigned int i;
1331
1332         if (dm_get_maps(vecs->mpvec))
1333                 return 1;
1334
1335         vector_foreach_slot (vecs->mpvec, mpp, i)
1336                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1337                     update_multipath_status(mpp)) {
1338                         remove_map(mpp, vecs, 1);
1339                         i--;
1340                 }
1341
1342         return 0;
1343 }
1344
1345 int
1346 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1347                 void * trigger_data)
1348 {
1349         struct vectors * vecs;
1350         int r;
1351
1352         *reply = NULL;
1353         *len = 0;
1354         vecs = (struct vectors *)trigger_data;
1355
1356         if ((str != NULL) && (is_root == false) &&
1357             (strncmp(str, "list", strlen("list")) != 0) &&
1358             (strncmp(str, "show", strlen("show")) != 0)) {
1359                 *reply = STRDUP("permission deny: need to be root");
1360                 if (*reply)
1361                         *len = strlen(*reply) + 1;
1362                 return 1;
1363         }
1364
1365         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1366
1367         if (r > 0) {
1368                 if (r == ETIMEDOUT)
1369                         *reply = STRDUP("timeout\n");
1370                 else
1371                         *reply = STRDUP("fail\n");
1372                 if (*reply)
1373                         *len = strlen(*reply) + 1;
1374                 r = 1;
1375         }
1376         else if (!r && *len == 0) {
1377                 *reply = STRDUP("ok\n");
1378                 if (*reply)
1379                         *len = strlen(*reply) + 1;
1380                 r = 0;
1381         }
1382         /* else if (r < 0) leave *reply alone */
1383
1384         return r;
1385 }
1386
1387 int
1388 uev_trigger (struct uevent * uev, void * trigger_data)
1389 {
1390         int r = 0;
1391         struct vectors * vecs;
1392         struct uevent *merge_uev, *tmp;
1393
1394         vecs = (struct vectors *)trigger_data;
1395
1396         pthread_cleanup_push(config_cleanup, NULL);
1397         pthread_mutex_lock(&config_lock);
1398         if (running_state != DAEMON_IDLE &&
1399             running_state != DAEMON_RUNNING)
1400                 pthread_cond_wait(&config_cond, &config_lock);
1401         pthread_cleanup_pop(1);
1402
1403         if (running_state == DAEMON_SHUTDOWN)
1404                 return 0;
1405
1406         /*
1407          * device map event
1408          * Add events are ignored here as the tables
1409          * are not fully initialised then.
1410          */
1411         if (!strncmp(uev->kernel, "dm-", 3)) {
1412                 if (!uevent_is_mpath(uev)) {
1413                         if (!strncmp(uev->action, "change", 6))
1414                                 (void)add_foreign(uev->udev);
1415                         else if (!strncmp(uev->action, "remove", 6))
1416                                 (void)delete_foreign(uev->udev);
1417                         goto out;
1418                 }
1419                 if (!strncmp(uev->action, "change", 6)) {
1420                         r = uev_add_map(uev, vecs);
1421
1422                         /*
1423                          * the kernel-side dm-mpath issues a PATH_FAILED event
1424                          * when it encounters a path IO error. It is reason-
1425                          * able be the entry of path IO error accounting pro-
1426                          * cess.
1427                          */
1428                         uev_pathfail_check(uev, vecs);
1429                 } else if (!strncmp(uev->action, "remove", 6)) {
1430                         r = uev_remove_map(uev, vecs);
1431                 }
1432                 goto out;
1433         }
1434
1435         /*
1436          * path add/remove/change event, add/remove maybe merged
1437          */
1438         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1439                 if (!strncmp(merge_uev->action, "add", 3))
1440                         r += uev_add_path(merge_uev, vecs, 0);
1441                 if (!strncmp(merge_uev->action, "remove", 6))
1442                         r += uev_remove_path(merge_uev, vecs, 0);
1443         }
1444
1445         if (!strncmp(uev->action, "add", 3))
1446                 r += uev_add_path(uev, vecs, 1);
1447         if (!strncmp(uev->action, "remove", 6))
1448                 r += uev_remove_path(uev, vecs, 1);
1449         if (!strncmp(uev->action, "change", 6))
1450                 r += uev_update_path(uev, vecs);
1451
1452 out:
1453         return r;
1454 }
1455
1456 static void rcu_unregister(void *param)
1457 {
1458         rcu_unregister_thread();
1459 }
1460
1461 static void *
1462 ueventloop (void * ap)
1463 {
1464         struct udev *udev = ap;
1465
1466         pthread_cleanup_push(rcu_unregister, NULL);
1467         rcu_register_thread();
1468         if (uevent_listen(udev))
1469                 condlog(0, "error starting uevent listener");
1470         pthread_cleanup_pop(1);
1471         return NULL;
1472 }
1473
1474 static void *
1475 uevqloop (void * ap)
1476 {
1477         pthread_cleanup_push(rcu_unregister, NULL);
1478         rcu_register_thread();
1479         if (uevent_dispatch(&uev_trigger, ap))
1480                 condlog(0, "error starting uevent dispatcher");
1481         pthread_cleanup_pop(1);
1482         return NULL;
1483 }
1484 static void *
1485 uxlsnrloop (void * ap)
1486 {
1487         if (cli_init()) {
1488                 condlog(1, "Failed to init uxsock listener");
1489                 return NULL;
1490         }
1491         pthread_cleanup_push(rcu_unregister, NULL);
1492         rcu_register_thread();
1493         set_handler_callback(LIST+PATHS, cli_list_paths);
1494         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1495         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1496         set_handler_callback(LIST+PATH, cli_list_path);
1497         set_handler_callback(LIST+MAPS, cli_list_maps);
1498         set_handler_callback(LIST+STATUS, cli_list_status);
1499         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1500         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1501         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1502         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1503         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1504         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1505         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1506         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1507         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1508         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1509         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1510         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1511         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1512         set_handler_callback(LIST+CONFIG, cli_list_config);
1513         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1514         set_handler_callback(LIST+DEVICES, cli_list_devices);
1515         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1516         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1517         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1518         set_handler_callback(ADD+PATH, cli_add_path);
1519         set_handler_callback(DEL+PATH, cli_del_path);
1520         set_handler_callback(ADD+MAP, cli_add_map);
1521         set_handler_callback(DEL+MAP, cli_del_map);
1522         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1523         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1524         set_handler_callback(SUSPEND+MAP, cli_suspend);
1525         set_handler_callback(RESUME+MAP, cli_resume);
1526         set_handler_callback(RESIZE+MAP, cli_resize);
1527         set_handler_callback(RELOAD+MAP, cli_reload);
1528         set_handler_callback(RESET+MAP, cli_reassign);
1529         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1530         set_handler_callback(FAIL+PATH, cli_fail);
1531         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1532         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1533         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1534         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1535         set_unlocked_handler_callback(QUIT, cli_quit);
1536         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1537         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1538         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1539         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1540         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1541         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1542         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1543         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1544         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1545
1546         umask(077);
1547         uxsock_listen(&uxsock_trigger, ap);
1548         pthread_cleanup_pop(1);
1549         return NULL;
1550 }
1551
1552 void
1553 exit_daemon (void)
1554 {
1555         post_config_state(DAEMON_SHUTDOWN);
1556 }
1557
1558 static void
1559 fail_path (struct path * pp, int del_active)
1560 {
1561         if (!pp->mpp)
1562                 return;
1563
1564         condlog(2, "checker failed path %s in map %s",
1565                  pp->dev_t, pp->mpp->alias);
1566
1567         dm_fail_path(pp->mpp->alias, pp->dev_t);
1568         if (del_active)
1569                 update_queue_mode_del_path(pp->mpp);
1570 }
1571
1572 /*
1573  * caller must have locked the path list before calling that function
1574  */
1575 static int
1576 reinstate_path (struct path * pp, int add_active)
1577 {
1578         int ret = 0;
1579
1580         if (!pp->mpp)
1581                 return 0;
1582
1583         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1584                 condlog(0, "%s: reinstate failed", pp->dev_t);
1585                 ret = 1;
1586         } else {
1587                 condlog(2, "%s: reinstated", pp->dev_t);
1588                 if (add_active)
1589                         update_queue_mode_add_path(pp->mpp);
1590         }
1591         return ret;
1592 }
1593
1594 static void
1595 enable_group(struct path * pp)
1596 {
1597         struct pathgroup * pgp;
1598
1599         /*
1600          * if path is added through uev_add_path, pgindex can be unset.
1601          * next update_strings() will set it, upon map reload event.
1602          *
1603          * we can safely return here, because upon map reload, all
1604          * PG will be enabled.
1605          */
1606         if (!pp->mpp->pg || !pp->pgindex)
1607                 return;
1608
1609         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1610
1611         if (pgp->status == PGSTATE_DISABLED) {
1612                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1613                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1614         }
1615 }
1616
1617 static void
1618 mpvec_garbage_collector (struct vectors * vecs)
1619 {
1620         struct multipath * mpp;
1621         unsigned int i;
1622
1623         if (!vecs->mpvec)
1624                 return;
1625
1626         vector_foreach_slot (vecs->mpvec, mpp, i) {
1627                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1628                         condlog(2, "%s: remove dead map", mpp->alias);
1629                         remove_map_and_stop_waiter(mpp, vecs);
1630                         i--;
1631                 }
1632         }
1633 }
1634
1635 /* This is called after a path has started working again. It the multipath
1636  * device for this path uses the followover failback type, and this is the
1637  * best pathgroup, and this is the first path in the pathgroup to come back
1638  * up, then switch to this pathgroup */
1639 static int
1640 followover_should_failback(struct path * pp)
1641 {
1642         struct pathgroup * pgp;
1643         struct path *pp1;
1644         int i;
1645
1646         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1647             !pp->mpp->pg || !pp->pgindex ||
1648             pp->pgindex != pp->mpp->bestpg)
1649                 return 0;
1650
1651         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1652         vector_foreach_slot(pgp->paths, pp1, i) {
1653                 if (pp1 == pp)
1654                         continue;
1655                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1656                         return 0;
1657         }
1658         return 1;
1659 }
1660
1661 static void
1662 missing_uev_wait_tick(struct vectors *vecs)
1663 {
1664         struct multipath * mpp;
1665         unsigned int i;
1666         int timed_out = 0, delayed_reconfig;
1667         struct config *conf;
1668
1669         vector_foreach_slot (vecs->mpvec, mpp, i) {
1670                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1671                         timed_out = 1;
1672                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1673                         if (mpp->wait_for_udev > 1 &&
1674                             update_map(mpp, vecs, 0)) {
1675                                 /* update_map removed map */
1676                                 i--;
1677                                 continue;
1678                         }
1679                         mpp->wait_for_udev = 0;
1680                 }
1681         }
1682
1683         conf = get_multipath_config();
1684         delayed_reconfig = conf->delayed_reconfig;
1685         put_multipath_config(conf);
1686         if (timed_out && delayed_reconfig &&
1687             !need_to_delay_reconfig(vecs)) {
1688                 condlog(2, "reconfigure (delayed)");
1689                 set_config_state(DAEMON_CONFIGURE);
1690         }
1691 }
1692
1693 static void
1694 ghost_delay_tick(struct vectors *vecs)
1695 {
1696         struct multipath * mpp;
1697         unsigned int i;
1698
1699         vector_foreach_slot (vecs->mpvec, mpp, i) {
1700                 if (mpp->ghost_delay_tick <= 0)
1701                         continue;
1702                 if (--mpp->ghost_delay_tick <= 0) {
1703                         condlog(0, "%s: timed out waiting for active path",
1704                                 mpp->alias);
1705                         mpp->force_udev_reload = 1;
1706                         if (update_map(mpp, vecs, 0) != 0) {
1707                                 /* update_map removed map */
1708                                 i--;
1709                                 continue;
1710                         }
1711                 }
1712         }
1713 }
1714
1715 static void
1716 defered_failback_tick (vector mpvec)
1717 {
1718         struct multipath * mpp;
1719         unsigned int i;
1720
1721         vector_foreach_slot (mpvec, mpp, i) {
1722                 /*
1723                  * deferred failback getting sooner
1724                  */
1725                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1726                         mpp->failback_tick--;
1727
1728                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1729                                 switch_pathgroup(mpp);
1730                 }
1731         }
1732 }
1733
1734 static void
1735 retry_count_tick(vector mpvec)
1736 {
1737         struct multipath *mpp;
1738         unsigned int i;
1739
1740         vector_foreach_slot (mpvec, mpp, i) {
1741                 if (mpp->retry_tick > 0) {
1742                         mpp->stat_total_queueing_time++;
1743                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1744                         if(--mpp->retry_tick == 0) {
1745                                 mpp->stat_map_failures++;
1746                                 dm_queue_if_no_path(mpp->alias, 0);
1747                                 condlog(2, "%s: Disable queueing", mpp->alias);
1748                         }
1749                 }
1750         }
1751 }
1752
1753 int update_prio(struct path *pp, int refresh_all)
1754 {
1755         int oldpriority;
1756         struct path *pp1;
1757         struct pathgroup * pgp;
1758         int i, j, changed = 0;
1759         struct config *conf;
1760
1761         if (refresh_all) {
1762                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1763                         vector_foreach_slot (pgp->paths, pp1, j) {
1764                                 oldpriority = pp1->priority;
1765                                 conf = get_multipath_config();
1766                                 pthread_cleanup_push(put_multipath_config,
1767                                                      conf);
1768                                 pathinfo(pp1, conf, DI_PRIO);
1769                                 pthread_cleanup_pop(1);
1770                                 if (pp1->priority != oldpriority)
1771                                         changed = 1;
1772                         }
1773                 }
1774                 return changed;
1775         }
1776         oldpriority = pp->priority;
1777         conf = get_multipath_config();
1778         pthread_cleanup_push(put_multipath_config, conf);
1779         if (pp->state != PATH_DOWN)
1780                 pathinfo(pp, conf, DI_PRIO);
1781         pthread_cleanup_pop(1);
1782
1783         if (pp->priority == oldpriority)
1784                 return 0;
1785         return 1;
1786 }
1787
1788 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1789 {
1790         if (reload_map(vecs, mpp, refresh, 1))
1791                 return 1;
1792
1793         dm_lib_release();
1794         if (setup_multipath(vecs, mpp) != 0)
1795                 return 1;
1796         sync_map_state(mpp);
1797
1798         return 0;
1799 }
1800
1801 /*
1802  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1803  * and '0' otherwise
1804  */
1805 int
1806 check_path (struct vectors * vecs, struct path * pp, int ticks)
1807 {
1808         int newstate;
1809         int new_path_up = 0;
1810         int chkr_new_path_up = 0;
1811         int add_active;
1812         int disable_reinstate = 0;
1813         int oldchkrstate = pp->chkrstate;
1814         int retrigger_tries, checkint;
1815         struct config *conf;
1816         int ret;
1817
1818         if ((pp->initialized == INIT_OK ||
1819              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1820                 return 0;
1821
1822         if (pp->tick)
1823                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1824         if (pp->tick)
1825                 return 0; /* don't check this path yet */
1826
1827         conf = get_multipath_config();
1828         retrigger_tries = conf->retrigger_tries;
1829         checkint = conf->checkint;
1830         put_multipath_config(conf);
1831         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1832             pp->retriggers < retrigger_tries) {
1833                 condlog(2, "%s: triggering change event to reinitialize",
1834                         pp->dev);
1835                 pp->initialized = INIT_REQUESTED_UDEV;
1836                 pp->retriggers++;
1837                 sysfs_attr_set_value(pp->udev, "uevent", "change",
1838                                      strlen("change"));
1839                 return 0;
1840         }
1841
1842         /*
1843          * provision a next check soonest,
1844          * in case we exit abnormaly from here
1845          */
1846         pp->tick = checkint;
1847
1848         newstate = path_offline(pp);
1849         /*
1850          * Wait for uevent for removed paths;
1851          * some LLDDs like zfcp keep paths unavailable
1852          * without sending uevents.
1853          */
1854         if (newstate == PATH_REMOVED)
1855                 newstate = PATH_DOWN;
1856
1857         if (newstate == PATH_UP) {
1858                 conf = get_multipath_config();
1859                 pthread_cleanup_push(put_multipath_config, conf);
1860                 newstate = get_state(pp, conf, 1, newstate);
1861                 pthread_cleanup_pop(1);
1862         } else
1863                 checker_clear_message(&pp->checker);
1864
1865         if (pp->wwid_changed) {
1866                 condlog(2, "%s: path wwid has changed. Refusing to use",
1867                         pp->dev);
1868                 newstate = PATH_DOWN;
1869         }
1870
1871         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1872                 condlog(2, "%s: unusable path", pp->dev);
1873                 conf = get_multipath_config();
1874                 pthread_cleanup_push(put_multipath_config, conf);
1875                 pathinfo(pp, conf, 0);
1876                 pthread_cleanup_pop(1);
1877                 return 1;
1878         }
1879         if (!pp->mpp) {
1880                 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1881                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1882                         condlog(2, "%s: add missing path", pp->dev);
1883                         conf = get_multipath_config();
1884                         pthread_cleanup_push(put_multipath_config, conf);
1885                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1886                         pthread_cleanup_pop(1);
1887                         if (ret == PATHINFO_OK) {
1888                                 ev_add_path(pp, vecs, 1);
1889                                 pp->tick = 1;
1890                         } else if (ret == PATHINFO_SKIPPED)
1891                                 return -1;
1892                 }
1893                 return 0;
1894         }
1895         /*
1896          * Async IO in flight. Keep the previous path state
1897          * and reschedule as soon as possible
1898          */
1899         if (newstate == PATH_PENDING) {
1900                 pp->tick = 1;
1901                 return 0;
1902         }
1903         /*
1904          * Synchronize with kernel state
1905          */
1906         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1907                 condlog(1, "%s: Could not synchronize with kernel state",
1908                         pp->dev);
1909                 pp->dmstate = PSTATE_UNDEF;
1910         }
1911         /* if update_multipath_strings orphaned the path, quit early */
1912         if (!pp->mpp)
1913                 return 0;
1914
1915         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1916                 pp->state = PATH_SHAKY;
1917                 /*
1918                  * to reschedule as soon as possible,so that this path can
1919                  * be recoverd in time
1920                  */
1921                 pp->tick = 1;
1922                 return 1;
1923         }
1924
1925         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1926              pp->wait_checks > 0) {
1927                 if (pp->mpp->nr_active > 0) {
1928                         pp->state = PATH_DELAYED;
1929                         pp->wait_checks--;
1930                         return 1;
1931                 } else
1932                         pp->wait_checks = 0;
1933         }
1934
1935         /*
1936          * don't reinstate failed path, if its in stand-by
1937          * and if target supports only implicit tpgs mode.
1938          * this will prevent unnecessary i/o by dm on stand-by
1939          * paths if there are no other active paths in map.
1940          */
1941         disable_reinstate = (newstate == PATH_GHOST &&
1942                             pp->mpp->nr_active == 0 &&
1943                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1944
1945         pp->chkrstate = newstate;
1946         if (newstate != pp->state) {
1947                 int oldstate = pp->state;
1948                 pp->state = newstate;
1949
1950                 LOG_MSG(1, checker_message(&pp->checker));
1951
1952                 /*
1953                  * upon state change, reset the checkint
1954                  * to the shortest delay
1955                  */
1956                 conf = get_multipath_config();
1957                 pp->checkint = conf->checkint;
1958                 put_multipath_config(conf);
1959
1960                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1961                         /*
1962                          * proactively fail path in the DM
1963                          */
1964                         if (oldstate == PATH_UP ||
1965                             oldstate == PATH_GHOST) {
1966                                 fail_path(pp, 1);
1967                                 if (pp->mpp->delay_wait_checks > 0 &&
1968                                     pp->watch_checks > 0) {
1969                                         pp->wait_checks = pp->mpp->delay_wait_checks;
1970                                         pp->watch_checks = 0;
1971                                 }
1972                         }else
1973                                 fail_path(pp, 0);
1974
1975                         /*
1976                          * cancel scheduled failback
1977                          */
1978                         pp->mpp->failback_tick = 0;
1979
1980                         pp->mpp->stat_path_failures++;
1981                         return 1;
1982                 }
1983
1984                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
1985                         if (pp->mpp->prflag) {
1986                                 /*
1987                                  * Check Persistent Reservation.
1988                                  */
1989                                 condlog(2, "%s: checking persistent "
1990                                         "reservation registration", pp->dev);
1991                                 mpath_pr_event_handle(pp);
1992                         }
1993                 }
1994
1995                 /*
1996                  * reinstate this path
1997                  */
1998                 if (oldstate != PATH_UP &&
1999                     oldstate != PATH_GHOST) {
2000                         if (pp->mpp->delay_watch_checks > 0)
2001                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2002                         add_active = 1;
2003                 } else {
2004                         if (pp->watch_checks > 0)
2005                                 pp->watch_checks--;
2006                         add_active = 0;
2007                 }
2008                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2009                         condlog(3, "%s: reload map", pp->dev);
2010                         ev_add_path(pp, vecs, 1);
2011                         pp->tick = 1;
2012                         return 0;
2013                 }
2014                 new_path_up = 1;
2015
2016                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2017                         chkr_new_path_up = 1;
2018
2019                 /*
2020                  * if at least one path is up in a group, and
2021                  * the group is disabled, re-enable it
2022                  */
2023                 if (newstate == PATH_UP)
2024                         enable_group(pp);
2025         }
2026         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2027                 if ((pp->dmstate == PSTATE_FAILED ||
2028                     pp->dmstate == PSTATE_UNDEF) &&
2029                     !disable_reinstate) {
2030                         /* Clear IO errors */
2031                         if (reinstate_path(pp, 0)) {
2032                                 condlog(3, "%s: reload map", pp->dev);
2033                                 ev_add_path(pp, vecs, 1);
2034                                 pp->tick = 1;
2035                                 return 0;
2036                         }
2037                 } else {
2038                         unsigned int max_checkint;
2039                         LOG_MSG(4, checker_message(&pp->checker));
2040                         conf = get_multipath_config();
2041                         max_checkint = conf->max_checkint;
2042                         put_multipath_config(conf);
2043                         if (pp->checkint != max_checkint) {
2044                                 /*
2045                                  * double the next check delay.
2046                                  * max at conf->max_checkint
2047                                  */
2048                                 if (pp->checkint < (max_checkint / 2))
2049                                         pp->checkint = 2 * pp->checkint;
2050                                 else
2051                                         pp->checkint = max_checkint;
2052
2053                                 condlog(4, "%s: delay next check %is",
2054                                         pp->dev_t, pp->checkint);
2055                         }
2056                         if (pp->watch_checks > 0)
2057                                 pp->watch_checks--;
2058                         pp->tick = pp->checkint;
2059                 }
2060         }
2061         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2062                 if (pp->dmstate == PSTATE_ACTIVE ||
2063                     pp->dmstate == PSTATE_UNDEF)
2064                         fail_path(pp, 0);
2065                 if (newstate == PATH_DOWN) {
2066                         int log_checker_err;
2067
2068                         conf = get_multipath_config();
2069                         log_checker_err = conf->log_checker_err;
2070                         put_multipath_config(conf);
2071                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2072                                 LOG_MSG(3, checker_message(&pp->checker));
2073                         else
2074                                 LOG_MSG(2, checker_message(&pp->checker));
2075                 }
2076         }
2077
2078         pp->state = newstate;
2079
2080         if (pp->mpp->wait_for_udev)
2081                 return 1;
2082         /*
2083          * path prio refreshing
2084          */
2085         condlog(4, "path prio refresh");
2086
2087         if (update_prio(pp, new_path_up) &&
2088             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2089              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2090                 update_path_groups(pp->mpp, vecs, !new_path_up);
2091         else if (need_switch_pathgroup(pp->mpp, 0)) {
2092                 if (pp->mpp->pgfailback > 0 &&
2093                     (new_path_up || pp->mpp->failback_tick <= 0))
2094                         pp->mpp->failback_tick =
2095                                 pp->mpp->pgfailback + 1;
2096                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2097                          (chkr_new_path_up && followover_should_failback(pp)))
2098                         switch_pathgroup(pp->mpp);
2099         }
2100         return 1;
2101 }
2102
2103 static void init_path_check_interval(struct vectors *vecs)
2104 {
2105         struct config *conf;
2106         struct path *pp;
2107         unsigned int i;
2108
2109         vector_foreach_slot (vecs->pathvec, pp, i) {
2110                 conf = get_multipath_config();
2111                 pp->checkint = conf->checkint;
2112                 put_multipath_config(conf);
2113         }
2114 }
2115
2116 static void *
2117 checkerloop (void *ap)
2118 {
2119         struct vectors *vecs;
2120         struct path *pp;
2121         int count = 0;
2122         unsigned int i;
2123         struct timespec last_time;
2124         struct config *conf;
2125
2126         pthread_cleanup_push(rcu_unregister, NULL);
2127         rcu_register_thread();
2128         mlockall(MCL_CURRENT | MCL_FUTURE);
2129         vecs = (struct vectors *)ap;
2130         condlog(2, "path checkers start up");
2131
2132         /* Tweak start time for initial path check */
2133         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2134                 last_time.tv_sec = 0;
2135         else
2136                 last_time.tv_sec -= 1;
2137
2138         while (1) {
2139                 struct timespec diff_time, start_time, end_time;
2140                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2141
2142                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2143                         start_time.tv_sec = 0;
2144                 if (start_time.tv_sec && last_time.tv_sec) {
2145                         timespecsub(&start_time, &last_time, &diff_time);
2146                         condlog(4, "tick (%lu.%06lu secs)",
2147                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2148                         last_time = start_time;
2149                         ticks = diff_time.tv_sec;
2150                 } else {
2151                         ticks = 1;
2152                         condlog(4, "tick (%d ticks)", ticks);
2153                 }
2154 #ifdef USE_SYSTEMD
2155                 if (use_watchdog)
2156                         sd_notify(0, "WATCHDOG=1");
2157 #endif
2158                 rc = set_config_state(DAEMON_RUNNING);
2159                 if (rc == ETIMEDOUT) {
2160                         condlog(4, "timeout waiting for DAEMON_IDLE");
2161                         continue;
2162                 }
2163
2164                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2165                 lock(&vecs->lock);
2166                 pthread_testcancel();
2167                 vector_foreach_slot (vecs->pathvec, pp, i) {
2168                         rc = check_path(vecs, pp, ticks);
2169                         if (rc < 0) {
2170                                 vector_del_slot(vecs->pathvec, i);
2171                                 free_path(pp);
2172                                 i--;
2173                         } else
2174                                 num_paths += rc;
2175                 }
2176                 lock_cleanup_pop(vecs->lock);
2177
2178                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2179                 lock(&vecs->lock);
2180                 pthread_testcancel();
2181                 defered_failback_tick(vecs->mpvec);
2182                 retry_count_tick(vecs->mpvec);
2183                 missing_uev_wait_tick(vecs);
2184                 ghost_delay_tick(vecs);
2185                 lock_cleanup_pop(vecs->lock);
2186
2187                 if (count)
2188                         count--;
2189                 else {
2190                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2191                         lock(&vecs->lock);
2192                         pthread_testcancel();
2193                         condlog(4, "map garbage collection");
2194                         mpvec_garbage_collector(vecs);
2195                         count = MAPGCINT;
2196                         lock_cleanup_pop(vecs->lock);
2197                 }
2198
2199                 diff_time.tv_nsec = 0;
2200                 if (start_time.tv_sec &&
2201                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2202                         timespecsub(&end_time, &start_time, &diff_time);
2203                         if (num_paths) {
2204                                 unsigned int max_checkint;
2205
2206                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
2207                                         num_paths, num_paths > 1 ? "s" : "",
2208                                         diff_time.tv_sec,
2209                                         diff_time.tv_nsec / 1000);
2210                                 conf = get_multipath_config();
2211                                 max_checkint = conf->max_checkint;
2212                                 put_multipath_config(conf);
2213                                 if (diff_time.tv_sec > max_checkint)
2214                                         condlog(1, "path checkers took longer "
2215                                                 "than %lu seconds, consider "
2216                                                 "increasing max_polling_interval",
2217                                                 diff_time.tv_sec);
2218                         }
2219                 }
2220                 check_foreign();
2221                 post_config_state(DAEMON_IDLE);
2222                 conf = get_multipath_config();
2223                 strict_timing = conf->strict_timing;
2224                 put_multipath_config(conf);
2225                 if (!strict_timing)
2226                         sleep(1);
2227                 else {
2228                         if (diff_time.tv_nsec) {
2229                                 diff_time.tv_sec = 0;
2230                                 diff_time.tv_nsec =
2231                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2232                         } else
2233                                 diff_time.tv_sec = 1;
2234
2235                         condlog(3, "waiting for %lu.%06lu secs",
2236                                 diff_time.tv_sec,
2237                                 diff_time.tv_nsec / 1000);
2238                         if (nanosleep(&diff_time, NULL) != 0) {
2239                                 condlog(3, "nanosleep failed with error %d",
2240                                         errno);
2241                                 conf = get_multipath_config();
2242                                 conf->strict_timing = 0;
2243                                 put_multipath_config(conf);
2244                                 break;
2245                         }
2246                 }
2247         }
2248         pthread_cleanup_pop(1);
2249         return NULL;
2250 }
2251
2252 int
2253 configure (struct vectors * vecs)
2254 {
2255         struct multipath * mpp;
2256         struct path * pp;
2257         vector mpvec;
2258         int i, ret;
2259         struct config *conf;
2260         static int force_reload = FORCE_RELOAD_WEAK;
2261
2262         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2263                 condlog(0, "couldn't allocate path vec in configure");
2264                 return 1;
2265         }
2266
2267         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2268                 condlog(0, "couldn't allocate multipath vec in configure");
2269                 return 1;
2270         }
2271
2272         if (!(mpvec = vector_alloc())) {
2273                 condlog(0, "couldn't allocate new maps vec in configure");
2274                 return 1;
2275         }
2276
2277         /*
2278          * probe for current path (from sysfs) and map (from dm) sets
2279          */
2280         ret = path_discovery(vecs->pathvec, DI_ALL);
2281         if (ret < 0) {
2282                 condlog(0, "configure failed at path discovery");
2283                 goto fail;
2284         }
2285
2286         vector_foreach_slot (vecs->pathvec, pp, i){
2287                 conf = get_multipath_config();
2288                 pthread_cleanup_push(put_multipath_config, conf);
2289                 if (filter_path(conf, pp) > 0){
2290                         vector_del_slot(vecs->pathvec, i);
2291                         free_path(pp);
2292                         i--;
2293                 }
2294                 else
2295                         pp->checkint = conf->checkint;
2296                 pthread_cleanup_pop(1);
2297         }
2298         if (map_discovery(vecs)) {
2299                 condlog(0, "configure failed at map discovery");
2300                 goto fail;
2301         }
2302
2303         /*
2304          * create new set of maps & push changed ones into dm
2305          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2306          * superfluous ACT_RELOAD ioctls. Later calls are done
2307          * with FORCE_RELOAD_YES.
2308          */
2309         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2310         if (force_reload == FORCE_RELOAD_WEAK)
2311                 force_reload = FORCE_RELOAD_YES;
2312         if (ret) {
2313                 condlog(0, "configure failed while coalescing paths");
2314                 goto fail;
2315         }
2316
2317         /*
2318          * may need to remove some maps which are no longer relevant
2319          * e.g., due to blacklist changes in conf file
2320          */
2321         if (coalesce_maps(vecs, mpvec)) {
2322                 condlog(0, "configure failed while coalescing maps");
2323                 goto fail;
2324         }
2325
2326         dm_lib_release();
2327
2328         sync_maps_state(mpvec);
2329         vector_foreach_slot(mpvec, mpp, i){
2330                 if (remember_wwid(mpp->wwid) == 1)
2331                         trigger_paths_udev_change(mpp, true);
2332                 update_map_pr(mpp);
2333         }
2334
2335         /*
2336          * purge dm of old maps
2337          */
2338         remove_maps(vecs);
2339
2340         /*
2341          * save new set of maps formed by considering current path state
2342          */
2343         vector_free(vecs->mpvec);
2344         vecs->mpvec = mpvec;
2345
2346         /*
2347          * start dm event waiter threads for these new maps
2348          */
2349         vector_foreach_slot(vecs->mpvec, mpp, i) {
2350                 if (wait_for_events(mpp, vecs)) {
2351                         remove_map(mpp, vecs, 1);
2352                         i--;
2353                         continue;
2354                 }
2355                 if (setup_multipath(vecs, mpp))
2356                         i--;
2357         }
2358         return 0;
2359
2360 fail:
2361         vector_free(mpvec);
2362         return 1;
2363 }
2364
2365 int
2366 need_to_delay_reconfig(struct vectors * vecs)
2367 {
2368         struct multipath *mpp;
2369         int i;
2370
2371         if (!VECTOR_SIZE(vecs->mpvec))
2372                 return 0;
2373
2374         vector_foreach_slot(vecs->mpvec, mpp, i) {
2375                 if (mpp->wait_for_udev)
2376                         return 1;
2377         }
2378         return 0;
2379 }
2380
2381 void rcu_free_config(struct rcu_head *head)
2382 {
2383         struct config *conf = container_of(head, struct config, rcu);
2384
2385         free_config(conf);
2386 }
2387
2388 int
2389 reconfigure (struct vectors * vecs)
2390 {
2391         struct config * old, *conf;
2392
2393         conf = load_config(DEFAULT_CONFIGFILE);
2394         if (!conf)
2395                 return 1;
2396
2397         /*
2398          * free old map and path vectors ... they use old conf state
2399          */
2400         if (VECTOR_SIZE(vecs->mpvec))
2401                 remove_maps_and_stop_waiters(vecs);
2402
2403         free_pathvec(vecs->pathvec, FREE_PATHS);
2404         vecs->pathvec = NULL;
2405         delete_all_foreign();
2406
2407         /* Re-read any timezone changes */
2408         tzset();
2409
2410         dm_tgt_version(conf->version, TGT_MPATH);
2411         if (verbosity)
2412                 conf->verbosity = verbosity;
2413         if (bindings_read_only)
2414                 conf->bindings_read_only = bindings_read_only;
2415         uxsock_timeout = conf->uxsock_timeout;
2416
2417         old = rcu_dereference(multipath_conf);
2418         rcu_assign_pointer(multipath_conf, conf);
2419         call_rcu(&old->rcu, rcu_free_config);
2420
2421         configure(vecs);
2422
2423
2424         return 0;
2425 }
2426
2427 static struct vectors *
2428 init_vecs (void)
2429 {
2430         struct vectors * vecs;
2431
2432         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2433
2434         if (!vecs)
2435                 return NULL;
2436
2437         pthread_mutex_init(&vecs->lock.mutex, NULL);
2438
2439         return vecs;
2440 }
2441
2442 static void *
2443 signal_set(int signo, void (*func) (int))
2444 {
2445         int r;
2446         struct sigaction sig;
2447         struct sigaction osig;
2448
2449         sig.sa_handler = func;
2450         sigemptyset(&sig.sa_mask);
2451         sig.sa_flags = 0;
2452
2453         r = sigaction(signo, &sig, &osig);
2454
2455         if (r < 0)
2456                 return (SIG_ERR);
2457         else
2458                 return (osig.sa_handler);
2459 }
2460
2461 void
2462 handle_signals(bool nonfatal)
2463 {
2464         if (exit_sig) {
2465                 condlog(2, "exit (signal)");
2466                 exit_sig = 0;
2467                 exit_daemon();
2468         }
2469         if (!nonfatal)
2470                 return;
2471         if (reconfig_sig) {
2472                 condlog(2, "reconfigure (signal)");
2473                 set_config_state(DAEMON_CONFIGURE);
2474         }
2475         if (log_reset_sig) {
2476                 condlog(2, "reset log (signal)");
2477                 if (logsink == 1)
2478                         log_thread_reset();
2479         }
2480         reconfig_sig = 0;
2481         log_reset_sig = 0;
2482 }
2483
2484 static void
2485 sighup (int sig)
2486 {
2487         reconfig_sig = 1;
2488 }
2489
2490 static void
2491 sigend (int sig)
2492 {
2493         exit_sig = 1;
2494 }
2495
2496 static void
2497 sigusr1 (int sig)
2498 {
2499         log_reset_sig = 1;
2500 }
2501
2502 static void
2503 sigusr2 (int sig)
2504 {
2505         condlog(3, "SIGUSR2 received");
2506 }
2507
2508 static void
2509 signal_init(void)
2510 {
2511         sigset_t set;
2512
2513         /* block all signals */
2514         sigfillset(&set);
2515         /* SIGPIPE occurs if logging fails */
2516         sigdelset(&set, SIGPIPE);
2517         pthread_sigmask(SIG_SETMASK, &set, NULL);
2518
2519         /* Other signals will be unblocked in the uxlsnr thread */
2520         signal_set(SIGHUP, sighup);
2521         signal_set(SIGUSR1, sigusr1);
2522         signal_set(SIGUSR2, sigusr2);
2523         signal_set(SIGINT, sigend);
2524         signal_set(SIGTERM, sigend);
2525         signal_set(SIGPIPE, sigend);
2526 }
2527
2528 static void
2529 setscheduler (void)
2530 {
2531         int res;
2532         static struct sched_param sched_param = {
2533                 .sched_priority = 99
2534         };
2535
2536         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2537
2538         if (res == -1)
2539                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2540         return;
2541 }
2542
2543 static void
2544 set_oom_adj (void)
2545 {
2546 #ifdef OOM_SCORE_ADJ_MIN
2547         int retry = 1;
2548         char *file = "/proc/self/oom_score_adj";
2549         int score = OOM_SCORE_ADJ_MIN;
2550 #else
2551         int retry = 0;
2552         char *file = "/proc/self/oom_adj";
2553         int score = OOM_ADJUST_MIN;
2554 #endif
2555         FILE *fp;
2556         struct stat st;
2557         char *envp;
2558
2559         envp = getenv("OOMScoreAdjust");
2560         if (envp) {
2561                 condlog(3, "Using systemd provided OOMScoreAdjust");
2562                 return;
2563         }
2564         do {
2565                 if (stat(file, &st) == 0){
2566                         fp = fopen(file, "w");
2567                         if (!fp) {
2568                                 condlog(0, "couldn't fopen %s : %s", file,
2569                                         strerror(errno));
2570                                 return;
2571                         }
2572                         fprintf(fp, "%i", score);
2573                         fclose(fp);
2574                         return;
2575                 }
2576                 if (errno != ENOENT) {
2577                         condlog(0, "couldn't stat %s : %s", file,
2578                                 strerror(errno));
2579                         return;
2580                 }
2581 #ifdef OOM_ADJUST_MIN
2582                 file = "/proc/self/oom_adj";
2583                 score = OOM_ADJUST_MIN;
2584 #else
2585                 retry = 0;
2586 #endif
2587         } while (retry--);
2588         condlog(0, "couldn't adjust oom score");
2589 }
2590
2591 static int
2592 child (void * param)
2593 {
2594         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2595         pthread_attr_t log_attr, misc_attr, uevent_attr;
2596         struct vectors * vecs;
2597         struct multipath * mpp;
2598         int i;
2599 #ifdef USE_SYSTEMD
2600         unsigned long checkint;
2601         int startup_done = 0;
2602 #endif
2603         int rc;
2604         int pid_fd = -1;
2605         struct config *conf;
2606         char *envp;
2607         int queue_without_daemon;
2608
2609         mlockall(MCL_CURRENT | MCL_FUTURE);
2610         signal_init();
2611         rcu_init();
2612
2613         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2614         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2615         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2616         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2617
2618         if (logsink == 1) {
2619                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2620                 log_thread_start(&log_attr);
2621                 pthread_attr_destroy(&log_attr);
2622         }
2623         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2624         if (pid_fd < 0) {
2625                 condlog(1, "failed to create pidfile");
2626                 if (logsink == 1)
2627                         log_thread_stop();
2628                 exit(1);
2629         }
2630
2631         post_config_state(DAEMON_START);
2632
2633         condlog(2, "--------start up--------");
2634         condlog(2, "read " DEFAULT_CONFIGFILE);
2635
2636         conf = load_config(DEFAULT_CONFIGFILE);
2637         if (!conf)
2638                 goto failed;
2639
2640         if (verbosity)
2641                 conf->verbosity = verbosity;
2642         if (bindings_read_only)
2643                 conf->bindings_read_only = bindings_read_only;
2644         uxsock_timeout = conf->uxsock_timeout;
2645         rcu_assign_pointer(multipath_conf, conf);
2646         if (init_checkers(conf->multipath_dir)) {
2647                 condlog(0, "failed to initialize checkers");
2648                 goto failed;
2649         }
2650         if (init_prio(conf->multipath_dir)) {
2651                 condlog(0, "failed to initialize prioritizers");
2652                 goto failed;
2653         }
2654         /* Failing this is non-fatal */
2655
2656         init_foreign(conf->multipath_dir);
2657
2658         if (poll_dmevents)
2659                 poll_dmevents = dmevent_poll_supported();
2660         setlogmask(LOG_UPTO(conf->verbosity + 3));
2661
2662         envp = getenv("LimitNOFILE");
2663
2664         if (envp)
2665                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2666         else
2667                 set_max_fds(conf->max_fds);
2668
2669         vecs = gvecs = init_vecs();
2670         if (!vecs)
2671                 goto failed;
2672
2673         setscheduler();
2674         set_oom_adj();
2675
2676 #ifdef USE_SYSTEMD
2677         envp = getenv("WATCHDOG_USEC");
2678         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2679                 /* Value is in microseconds */
2680                 conf->max_checkint = checkint / 1000000;
2681                 /* Rescale checkint */
2682                 if (conf->checkint > conf->max_checkint)
2683                         conf->checkint = conf->max_checkint;
2684                 else
2685                         conf->checkint = conf->max_checkint / 4;
2686                 condlog(3, "enabling watchdog, interval %d max %d",
2687                         conf->checkint, conf->max_checkint);
2688                 use_watchdog = conf->checkint;
2689         }
2690 #endif
2691         /*
2692          * Startup done, invalidate configuration
2693          */
2694         conf = NULL;
2695
2696         /*
2697          * Signal start of configuration
2698          */
2699         post_config_state(DAEMON_CONFIGURE);
2700
2701         init_path_check_interval(vecs);
2702
2703         if (poll_dmevents) {
2704                 if (init_dmevent_waiter(vecs)) {
2705                         condlog(0, "failed to allocate dmevents waiter info");
2706                         goto failed;
2707                 }
2708                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2709                                          wait_dmevents, NULL))) {
2710                         condlog(0, "failed to create dmevent waiter thread: %d",
2711                                 rc);
2712                         goto failed;
2713                 }
2714         }
2715
2716         /*
2717          * Start uevent listener early to catch events
2718          */
2719         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2720                 condlog(0, "failed to create uevent thread: %d", rc);
2721                 goto failed;
2722         }
2723         pthread_attr_destroy(&uevent_attr);
2724         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2725                 condlog(0, "failed to create cli listener: %d", rc);
2726                 goto failed;
2727         }
2728
2729         /*
2730          * start threads
2731          */
2732         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2733                 condlog(0,"failed to create checker loop thread: %d", rc);
2734                 goto failed;
2735         }
2736         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2737                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2738                 goto failed;
2739         }
2740         pthread_attr_destroy(&misc_attr);
2741
2742         while (running_state != DAEMON_SHUTDOWN) {
2743                 pthread_cleanup_push(config_cleanup, NULL);
2744                 pthread_mutex_lock(&config_lock);
2745                 if (running_state != DAEMON_CONFIGURE &&
2746                     running_state != DAEMON_SHUTDOWN) {
2747                         pthread_cond_wait(&config_cond, &config_lock);
2748                 }
2749                 pthread_cleanup_pop(1);
2750                 if (running_state == DAEMON_CONFIGURE) {
2751                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2752                         lock(&vecs->lock);
2753                         pthread_testcancel();
2754                         if (!need_to_delay_reconfig(vecs)) {
2755                                 reconfigure(vecs);
2756                         } else {
2757                                 conf = get_multipath_config();
2758                                 conf->delayed_reconfig = 1;
2759                                 put_multipath_config(conf);
2760                         }
2761                         lock_cleanup_pop(vecs->lock);
2762                         post_config_state(DAEMON_IDLE);
2763 #ifdef USE_SYSTEMD
2764                         if (!startup_done) {
2765                                 sd_notify(0, "READY=1");
2766                                 startup_done = 1;
2767                         }
2768 #endif
2769                 }
2770         }
2771
2772         lock(&vecs->lock);
2773         conf = get_multipath_config();
2774         queue_without_daemon = conf->queue_without_daemon;
2775         put_multipath_config(conf);
2776         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2777                 vector_foreach_slot(vecs->mpvec, mpp, i)
2778                         dm_queue_if_no_path(mpp->alias, 0);
2779         remove_maps_and_stop_waiters(vecs);
2780         unlock(&vecs->lock);
2781
2782         pthread_cancel(check_thr);
2783         pthread_cancel(uevent_thr);
2784         pthread_cancel(uxlsnr_thr);
2785         pthread_cancel(uevq_thr);
2786         if (poll_dmevents)
2787                 pthread_cancel(dmevent_thr);
2788
2789         pthread_join(check_thr, NULL);
2790         pthread_join(uevent_thr, NULL);
2791         pthread_join(uxlsnr_thr, NULL);
2792         pthread_join(uevq_thr, NULL);
2793         if (poll_dmevents)
2794                 pthread_join(dmevent_thr, NULL);
2795
2796         stop_io_err_stat_thread();
2797
2798         lock(&vecs->lock);
2799         free_pathvec(vecs->pathvec, FREE_PATHS);
2800         vecs->pathvec = NULL;
2801         unlock(&vecs->lock);
2802
2803         pthread_mutex_destroy(&vecs->lock.mutex);
2804         FREE(vecs);
2805         vecs = NULL;
2806
2807         cleanup_foreign();
2808         cleanup_checkers();
2809         cleanup_prio();
2810         if (poll_dmevents)
2811                 cleanup_dmevent_waiter();
2812
2813         dm_lib_release();
2814         dm_lib_exit();
2815
2816         /* We're done here */
2817         condlog(3, "unlink pidfile");
2818         unlink(DEFAULT_PIDFILE);
2819
2820         condlog(2, "--------shut down-------");
2821
2822         if (logsink == 1)
2823                 log_thread_stop();
2824
2825         /*
2826          * Freeing config must be done after condlog() and dm_lib_exit(),
2827          * because logging functions like dlog() and dm_write_log()
2828          * reference the config.
2829          */
2830         conf = rcu_dereference(multipath_conf);
2831         rcu_assign_pointer(multipath_conf, NULL);
2832         call_rcu(&conf->rcu, rcu_free_config);
2833         udev_unref(udev);
2834         udev = NULL;
2835         pthread_attr_destroy(&waiter_attr);
2836         pthread_attr_destroy(&io_err_stat_attr);
2837 #ifdef _DEBUG_
2838         dbg_free_final(NULL);
2839 #endif
2840
2841 #ifdef USE_SYSTEMD
2842         sd_notify(0, "ERRNO=0");
2843 #endif
2844         exit(0);
2845
2846 failed:
2847 #ifdef USE_SYSTEMD
2848         sd_notify(0, "ERRNO=1");
2849 #endif
2850         if (pid_fd >= 0)
2851                 close(pid_fd);
2852         exit(1);
2853 }
2854
2855 static int
2856 daemonize(void)
2857 {
2858         int pid;
2859         int dev_null_fd;
2860
2861         if( (pid = fork()) < 0){
2862                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2863                 return -1;
2864         }
2865         else if (pid != 0)
2866                 return pid;
2867
2868         setsid();
2869
2870         if ( (pid = fork()) < 0)
2871                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2872         else if (pid != 0)
2873                 _exit(0);
2874
2875         if (chdir("/") < 0)
2876                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2877
2878         dev_null_fd = open("/dev/null", O_RDWR);
2879         if (dev_null_fd < 0){
2880                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2881                         strerror(errno));
2882                 _exit(0);
2883         }
2884
2885         close(STDIN_FILENO);
2886         if (dup(dev_null_fd) < 0) {
2887                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2888                         strerror(errno));
2889                 _exit(0);
2890         }
2891         close(STDOUT_FILENO);
2892         if (dup(dev_null_fd) < 0) {
2893                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2894                         strerror(errno));
2895                 _exit(0);
2896         }
2897         close(STDERR_FILENO);
2898         if (dup(dev_null_fd) < 0) {
2899                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2900                         strerror(errno));
2901                 _exit(0);
2902         }
2903         close(dev_null_fd);
2904         daemon_pid = getpid();
2905         return 0;
2906 }
2907
2908 int
2909 main (int argc, char *argv[])
2910 {
2911         extern char *optarg;
2912         extern int optind;
2913         int arg;
2914         int err;
2915         int foreground = 0;
2916         struct config *conf;
2917
2918         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2919                                    "Manipulated through RCU");
2920         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2921                 "Suppress complaints about unprotected running_state reads");
2922         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2923                 "Suppress complaints about this scalar variable");
2924
2925         logsink = 1;
2926
2927         if (getuid() != 0) {
2928                 fprintf(stderr, "need to be root\n");
2929                 exit(1);
2930         }
2931
2932         /* make sure we don't lock any path */
2933         if (chdir("/") < 0)
2934                 fprintf(stderr, "can't chdir to root directory : %s\n",
2935                         strerror(errno));
2936         umask(umask(077) | 022);
2937
2938         pthread_cond_init_mono(&config_cond);
2939
2940         udev = udev_new();
2941         libmp_udev_set_sync_support(0);
2942
2943         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
2944                 switch(arg) {
2945                 case 'd':
2946                         foreground = 1;
2947                         if (logsink > 0)
2948                                 logsink = 0;
2949                         //debug=1; /* ### comment me out ### */
2950                         break;
2951                 case 'v':
2952                         if (sizeof(optarg) > sizeof(char *) ||
2953                             !isdigit(optarg[0]))
2954                                 exit(1);
2955
2956                         verbosity = atoi(optarg);
2957                         break;
2958                 case 's':
2959                         logsink = -1;
2960                         break;
2961                 case 'k':
2962                         logsink = 0;
2963                         conf = load_config(DEFAULT_CONFIGFILE);
2964                         if (!conf)
2965                                 exit(1);
2966                         if (verbosity)
2967                                 conf->verbosity = verbosity;
2968                         uxsock_timeout = conf->uxsock_timeout;
2969                         uxclnt(optarg, uxsock_timeout + 100);
2970                         free_config(conf);
2971                         exit(0);
2972                 case 'B':
2973                         bindings_read_only = 1;
2974                         break;
2975                 case 'n':
2976                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
2977                         break;
2978                 case 'w':
2979                         poll_dmevents = 0;
2980                         break;
2981                 default:
2982                         fprintf(stderr, "Invalid argument '-%c'\n",
2983                                 optopt);
2984                         exit(1);
2985                 }
2986         }
2987         if (optind < argc) {
2988                 char cmd[CMDSIZE];
2989                 char * s = cmd;
2990                 char * c = s;
2991
2992                 logsink = 0;
2993                 conf = load_config(DEFAULT_CONFIGFILE);
2994                 if (!conf)
2995                         exit(1);
2996                 if (verbosity)
2997                         conf->verbosity = verbosity;
2998                 uxsock_timeout = conf->uxsock_timeout;
2999                 memset(cmd, 0x0, CMDSIZE);
3000                 while (optind < argc) {
3001                         if (strchr(argv[optind], ' '))
3002                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3003                         else
3004                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3005                         optind++;
3006                 }
3007                 c += snprintf(c, s + CMDSIZE - c, "\n");
3008                 uxclnt(s, uxsock_timeout + 100);
3009                 free_config(conf);
3010                 exit(0);
3011         }
3012
3013         if (foreground) {
3014                 if (!isatty(fileno(stdout)))
3015                         setbuf(stdout, NULL);
3016                 err = 0;
3017                 daemon_pid = getpid();
3018         } else
3019                 err = daemonize();
3020
3021         if (err < 0)
3022                 /* error */
3023                 exit(1);
3024         else if (err > 0)
3025                 /* parent dies */
3026                 exit(0);
3027         else
3028                 /* child lives */
3029                 return (child(NULL));
3030 }
3031
3032 void *  mpath_pr_event_handler_fn (void * pathp )
3033 {
3034         struct multipath * mpp;
3035         int i, ret, isFound;
3036         struct path * pp = (struct path *)pathp;
3037         struct prout_param_descriptor *param;
3038         struct prin_resp *resp;
3039
3040         rcu_register_thread();
3041         mpp = pp->mpp;
3042
3043         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3044         if (!resp){
3045                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3046                 goto out;
3047         }
3048
3049         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3050         if (ret != MPATH_PR_SUCCESS )
3051         {
3052                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3053                 goto out;
3054         }
3055
3056         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3057                         resp->prin_descriptor.prin_readkeys.additional_length );
3058
3059         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3060         {
3061                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3062                 ret = MPATH_PR_SUCCESS;
3063                 goto out;
3064         }
3065         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3066                 get_be64(mpp->reservation_key));
3067
3068         isFound =0;
3069         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3070         {
3071                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3072                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3073                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3074                 {
3075                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3076                         isFound =1;
3077                         break;
3078                 }
3079         }
3080         if (!isFound)
3081         {
3082                 condlog(0, "%s: Either device not registered or ", pp->dev);
3083                 condlog(0, "host is not authorised for registration. Skip path");
3084                 ret = MPATH_PR_OTHER;
3085                 goto out;
3086         }
3087
3088         param= malloc(sizeof(struct prout_param_descriptor));
3089         memset(param, 0 , sizeof(struct prout_param_descriptor));
3090         param->sa_flags = mpp->sa_flags;
3091         memcpy(param->sa_key, &mpp->reservation_key, 8);
3092         param->num_transportid = 0;
3093
3094         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3095
3096         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3097         if (ret != MPATH_PR_SUCCESS )
3098         {
3099                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3100         }
3101         mpp->prflag = 1;
3102
3103         free(param);
3104 out:
3105         if (resp)
3106                 free(resp);
3107         rcu_unregister_thread();
3108         return NULL;
3109 }
3110
3111 int mpath_pr_event_handle(struct path *pp)
3112 {
3113         pthread_t thread;
3114         int rc;
3115         pthread_attr_t attr;
3116         struct multipath * mpp;
3117
3118         if (pp->bus != SYSFS_BUS_SCSI)
3119                 return 0;
3120
3121         mpp = pp->mpp;
3122
3123         if (!get_be64(mpp->reservation_key))
3124                 return -1;
3125
3126         pthread_attr_init(&attr);
3127         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3128
3129         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3130         if (rc) {
3131                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3132                 return -1;
3133         }
3134         pthread_attr_destroy(&attr);
3135         rc = pthread_join(thread, NULL);
3136         return 0;
3137 }