e15ece03ae24baee1f1290ffba0ea4ea45204238
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69 #include "uxsock.h"
70
71 #include "mpath_cmd.h"
72 #include "mpath_persist.h"
73
74 #include "prioritizers/alua_rtpg.h"
75
76 #include "main.h"
77 #include "pidfile.h"
78 #include "uxlsnr.h"
79 #include "uxclnt.h"
80 #include "cli.h"
81 #include "cli_handlers.h"
82 #include "lock.h"
83 #include "waiter.h"
84 #include "dmevents.h"
85 #include "io_err_stat.h"
86 #include "wwids.h"
87 #include "foreign.h"
88 #include "../third-party/valgrind/drd.h"
89
90 #define FILE_NAME_SIZE 256
91 #define CMDSIZE 160
92
93 #define LOG_MSG(lvl, verb, pp)                                  \
94 do {                                                            \
95         if (pp->mpp && checker_selected(&pp->checker) &&        \
96             lvl <= verb) {                                      \
97                 if (pp->offline)                                \
98                         condlog(lvl, "%s: %s - path offline",   \
99                                 pp->mpp->alias, pp->dev);       \
100                 else  {                                         \
101                         const char *__m =                       \
102                                 checker_message(&pp->checker);  \
103                                                                 \
104                         if (strlen(__m))                              \
105                                 condlog(lvl, "%s: %s - %s checker%s", \
106                                         pp->mpp->alias,               \
107                                         pp->dev,                      \
108                                         checker_name(&pp->checker),   \
109                                         __m);                         \
110                 }                                                     \
111         }                                                             \
112 } while(0)
113
114 struct mpath_event_param
115 {
116         char * devname;
117         struct multipath *mpp;
118 };
119
120 int logsink;
121 int uxsock_timeout;
122 int verbosity;
123 int bindings_read_only;
124 int ignore_new_devs;
125 #ifdef NO_DMEVENTS_POLL
126 int poll_dmevents = 0;
127 #else
128 int poll_dmevents = 1;
129 #endif
130 /* Don't access this variable without holding config_lock */
131 enum daemon_status running_state = DAEMON_INIT;
132 pid_t daemon_pid;
133 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
134 pthread_cond_t config_cond;
135
136 static inline enum daemon_status get_running_state(void)
137 {
138         enum daemon_status st;
139
140         pthread_mutex_lock(&config_lock);
141         st = running_state;
142         pthread_mutex_unlock(&config_lock);
143         return st;
144 }
145
146 /*
147  * global copy of vecs for use in sig handlers
148  */
149 struct vectors * gvecs;
150
151 struct udev * udev;
152
153 struct config *multipath_conf;
154
155 /* Local variables */
156 static volatile sig_atomic_t exit_sig;
157 static volatile sig_atomic_t reconfig_sig;
158 static volatile sig_atomic_t log_reset_sig;
159
160 const char *
161 daemon_status(void)
162 {
163         switch (get_running_state()) {
164         case DAEMON_INIT:
165                 return "init";
166         case DAEMON_START:
167                 return "startup";
168         case DAEMON_CONFIGURE:
169                 return "configure";
170         case DAEMON_IDLE:
171                 return "idle";
172         case DAEMON_RUNNING:
173                 return "running";
174         case DAEMON_SHUTDOWN:
175                 return "shutdown";
176         }
177         return NULL;
178 }
179
180 /*
181  * I love you too, systemd ...
182  */
183 static const char *
184 sd_notify_status(enum daemon_status state)
185 {
186         switch (state) {
187         case DAEMON_INIT:
188                 return "STATUS=init";
189         case DAEMON_START:
190                 return "STATUS=startup";
191         case DAEMON_CONFIGURE:
192                 return "STATUS=configure";
193         case DAEMON_IDLE:
194         case DAEMON_RUNNING:
195                 return "STATUS=up";
196         case DAEMON_SHUTDOWN:
197                 return "STATUS=shutdown";
198         }
199         return NULL;
200 }
201
202 #ifdef USE_SYSTEMD
203 static void do_sd_notify(enum daemon_status old_state,
204                          enum daemon_status new_state)
205 {
206         /*
207          * Checkerloop switches back and forth between idle and running state.
208          * No need to tell systemd each time.
209          * These notifications cause a lot of overhead on dbus.
210          */
211         if ((new_state == DAEMON_IDLE || new_state == DAEMON_RUNNING) &&
212             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
213                 return;
214         sd_notify(0, sd_notify_status(new_state));
215 }
216 #endif
217
218 static void config_cleanup(__attribute__((unused)) void *arg)
219 {
220         pthread_mutex_unlock(&config_lock);
221 }
222
223 /*
224  * If the current status is @oldstate, wait for at most @ms milliseconds
225  * for the state to change, and return the new state, which may still be
226  * @oldstate.
227  */
228 enum daemon_status wait_for_state_change_if(enum daemon_status oldstate,
229                                             unsigned long ms)
230 {
231         enum daemon_status st;
232         struct timespec tmo;
233
234         if (oldstate == DAEMON_SHUTDOWN)
235                 return DAEMON_SHUTDOWN;
236
237         pthread_mutex_lock(&config_lock);
238         pthread_cleanup_push(config_cleanup, NULL);
239         st = running_state;
240         if (st == oldstate && clock_gettime(CLOCK_MONOTONIC, &tmo) == 0) {
241                 tmo.tv_nsec += ms * 1000 * 1000;
242                 normalize_timespec(&tmo);
243                 (void)pthread_cond_timedwait(&config_cond, &config_lock, &tmo);
244                 st = running_state;
245         }
246         pthread_cleanup_pop(1);
247         return st;
248 }
249
250 /* must be called with config_lock held */
251 static void __post_config_state(enum daemon_status state)
252 {
253         if (state != running_state && running_state != DAEMON_SHUTDOWN) {
254                 enum daemon_status old_state = running_state;
255
256                 running_state = state;
257                 pthread_cond_broadcast(&config_cond);
258 #ifdef USE_SYSTEMD
259                 do_sd_notify(old_state, state);
260 #endif
261         }
262 }
263
264 void post_config_state(enum daemon_status state)
265 {
266         pthread_mutex_lock(&config_lock);
267         pthread_cleanup_push(config_cleanup, NULL);
268         __post_config_state(state);
269         pthread_cleanup_pop(1);
270 }
271
272 int set_config_state(enum daemon_status state)
273 {
274         int rc = 0;
275
276         pthread_cleanup_push(config_cleanup, NULL);
277         pthread_mutex_lock(&config_lock);
278         if (running_state != state) {
279                 enum daemon_status old_state = running_state;
280
281                 if (running_state == DAEMON_SHUTDOWN)
282                         rc = EINVAL;
283                 else if (running_state != DAEMON_IDLE) {
284                         struct timespec ts;
285
286                         get_monotonic_time(&ts);
287                         ts.tv_sec += 1;
288                         rc = pthread_cond_timedwait(&config_cond,
289                                                     &config_lock, &ts);
290                 }
291                 if (!rc && (running_state != DAEMON_SHUTDOWN)) {
292                         running_state = state;
293                         pthread_cond_broadcast(&config_cond);
294 #ifdef USE_SYSTEMD
295                         do_sd_notify(old_state, state);
296 #endif
297                 }
298         }
299         pthread_cleanup_pop(1);
300         return rc;
301 }
302
303 struct config *get_multipath_config(void)
304 {
305         rcu_read_lock();
306         return rcu_dereference(multipath_conf);
307 }
308
309 void put_multipath_config(__attribute__((unused)) void *arg)
310 {
311         rcu_read_unlock();
312 }
313
314 static int
315 need_switch_pathgroup (struct multipath * mpp, int refresh)
316 {
317         struct pathgroup * pgp;
318         struct path * pp;
319         unsigned int i, j;
320         struct config *conf;
321         int bestpg;
322
323         if (!mpp)
324                 return 0;
325
326         /*
327          * Refresh path priority values
328          */
329         if (refresh) {
330                 vector_foreach_slot (mpp->pg, pgp, i) {
331                         vector_foreach_slot (pgp->paths, pp, j) {
332                                 conf = get_multipath_config();
333                                 pthread_cleanup_push(put_multipath_config,
334                                                      conf);
335                                 pathinfo(pp, conf, DI_PRIO);
336                                 pthread_cleanup_pop(1);
337                         }
338                 }
339         }
340
341         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
342                 return 0;
343
344         bestpg = select_path_group(mpp);
345         if (mpp->pgfailback == -FAILBACK_MANUAL)
346                 return 0;
347
348         mpp->bestpg = bestpg;
349         if (mpp->bestpg != mpp->nextpg)
350                 return 1;
351
352         return 0;
353 }
354
355 static void
356 switch_pathgroup (struct multipath * mpp)
357 {
358         mpp->stat_switchgroup++;
359         dm_switchgroup(mpp->alias, mpp->bestpg);
360         condlog(2, "%s: switch to path group #%i",
361                  mpp->alias, mpp->bestpg);
362 }
363
364 static int
365 wait_for_events(struct multipath *mpp, struct vectors *vecs)
366 {
367         if (poll_dmevents)
368                 return watch_dmevents(mpp->alias);
369         else
370                 return start_waiter_thread(mpp, vecs);
371 }
372
373 static void
374 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
375 {
376         /* devices are automatically removed by the dmevent polling code,
377          * so they don't need to be manually removed here */
378         condlog(3, "%s: removing map from internal tables", mpp->alias);
379         if (!poll_dmevents)
380                 stop_waiter_thread(mpp);
381         remove_map(mpp, vecs, PURGE_VEC);
382 }
383
384 static void
385 remove_maps_and_stop_waiters(struct vectors *vecs)
386 {
387         int i;
388         struct multipath * mpp;
389
390         if (!vecs)
391                 return;
392
393         if (!poll_dmevents) {
394                 vector_foreach_slot(vecs->mpvec, mpp, i)
395                         stop_waiter_thread(mpp);
396         }
397         else
398                 unwatch_all_dmevents();
399
400         remove_maps(vecs);
401 }
402
403 static void
404 set_multipath_wwid (struct multipath * mpp)
405 {
406         if (strlen(mpp->wwid))
407                 return;
408
409         dm_get_uuid(mpp->alias, mpp->wwid, WWID_SIZE);
410 }
411
412 static void set_no_path_retry(struct multipath *mpp)
413 {
414         char is_queueing = 0;
415
416         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
417         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
418                 is_queueing = 1;
419
420         switch (mpp->no_path_retry) {
421         case NO_PATH_RETRY_UNDEF:
422                 break;
423         case NO_PATH_RETRY_FAIL:
424                 if (is_queueing)
425                         dm_queue_if_no_path(mpp->alias, 0);
426                 break;
427         case NO_PATH_RETRY_QUEUE:
428                 if (!is_queueing)
429                         dm_queue_if_no_path(mpp->alias, 1);
430                 break;
431         default:
432                 if (mpp->nr_active > 0) {
433                         mpp->retry_tick = 0;
434                         if (!is_queueing)
435                                 dm_queue_if_no_path(mpp->alias, 1);
436                 } else if (is_queueing && mpp->retry_tick == 0)
437                         enter_recovery_mode(mpp);
438                 break;
439         }
440 }
441
442 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
443                       int reset)
444 {
445         if (dm_get_info(mpp->alias, &mpp->dmi)) {
446                 /* Error accessing table */
447                 condlog(3, "%s: cannot access table", mpp->alias);
448                 goto out;
449         }
450
451         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
452                 condlog(0, "%s: failed to setup multipath", mpp->alias);
453                 goto out;
454         }
455
456         if (reset) {
457                 set_no_path_retry(mpp);
458                 if (VECTOR_SIZE(mpp->paths) != 0)
459                         dm_cancel_deferred_remove(mpp);
460         }
461
462         return 0;
463 out:
464         remove_map_and_stop_waiter(mpp, vecs);
465         return 1;
466 }
467
468 int update_multipath (struct vectors *vecs, char *mapname, int reset)
469 {
470         struct multipath *mpp;
471         struct pathgroup  *pgp;
472         struct path *pp;
473         int i, j;
474
475         mpp = find_mp_by_alias(vecs->mpvec, mapname);
476
477         if (!mpp) {
478                 condlog(3, "%s: multipath map not found", mapname);
479                 return 2;
480         }
481
482         if (__setup_multipath(vecs, mpp, reset))
483                 return 1; /* mpp freed in setup_multipath */
484
485         /*
486          * compare checkers states with DM states
487          */
488         vector_foreach_slot (mpp->pg, pgp, i) {
489                 vector_foreach_slot (pgp->paths, pp, j) {
490                         if (pp->dmstate != PSTATE_FAILED)
491                                 continue;
492
493                         if (pp->state != PATH_DOWN) {
494                                 struct config *conf;
495                                 int oldstate = pp->state;
496                                 unsigned int checkint;
497
498                                 conf = get_multipath_config();
499                                 checkint = conf->checkint;
500                                 put_multipath_config(conf);
501                                 condlog(2, "%s: mark as failed", pp->dev);
502                                 mpp->stat_path_failures++;
503                                 pp->state = PATH_DOWN;
504                                 if (oldstate == PATH_UP ||
505                                     oldstate == PATH_GHOST)
506                                         update_queue_mode_del_path(mpp);
507
508                                 /*
509                                  * if opportune,
510                                  * schedule the next check earlier
511                                  */
512                                 if (pp->tick > checkint)
513                                         pp->tick = checkint;
514                         }
515                 }
516         }
517         return 0;
518 }
519
520 static int
521 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
522 {
523         int retries = 3;
524         char params[PARAMS_SIZE] = {0};
525
526 retry:
527         condlog(4, "%s: updating new map", mpp->alias);
528         if (adopt_paths(vecs->pathvec, mpp)) {
529                 condlog(0, "%s: failed to adopt paths for new map update",
530                         mpp->alias);
531                 retries = -1;
532                 goto fail;
533         }
534         verify_paths(mpp, vecs);
535         mpp->action = ACT_RELOAD;
536
537         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
538                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
539                 retries = -1;
540                 goto fail;
541         }
542         if (domap(mpp, params, 1) == DOMAP_FAIL && retries-- > 0) {
543                 condlog(0, "%s: map_udate sleep", mpp->alias);
544                 sleep(1);
545                 goto retry;
546         }
547         dm_lib_release();
548
549 fail:
550         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
551                 condlog(0, "%s: failed to create new map", mpp->alias);
552                 remove_map(mpp, vecs, 1);
553                 return 1;
554         }
555
556         if (setup_multipath(vecs, mpp))
557                 return 1;
558
559         sync_map_state(mpp);
560
561         if (retries < 0)
562                 condlog(0, "%s: failed reload in new map update", mpp->alias);
563         return 0;
564 }
565
566 static struct multipath *
567 add_map_without_path (struct vectors *vecs, const char *alias)
568 {
569         struct multipath * mpp = alloc_multipath();
570         struct config *conf;
571
572         if (!mpp)
573                 return NULL;
574         if (!alias) {
575                 FREE(mpp);
576                 return NULL;
577         }
578
579         mpp->alias = STRDUP(alias);
580
581         if (dm_get_info(mpp->alias, &mpp->dmi)) {
582                 condlog(3, "%s: cannot access table", mpp->alias);
583                 goto out;
584         }
585         set_multipath_wwid(mpp);
586         conf = get_multipath_config();
587         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
588         put_multipath_config(conf);
589
590         if (update_multipath_table(mpp, vecs->pathvec, 1))
591                 goto out;
592         if (update_multipath_status(mpp))
593                 goto out;
594
595         if (!vector_alloc_slot(vecs->mpvec))
596                 goto out;
597
598         vector_set_slot(vecs->mpvec, mpp);
599
600         if (update_map(mpp, vecs, 1) != 0) /* map removed */
601                 return NULL;
602
603         return mpp;
604 out:
605         remove_map(mpp, vecs, PURGE_VEC);
606         return NULL;
607 }
608
609 static int
610 coalesce_maps(struct vectors *vecs, vector nmpv)
611 {
612         struct multipath * ompp;
613         vector ompv = vecs->mpvec;
614         unsigned int i, reassign_maps;
615         struct config *conf;
616
617         conf = get_multipath_config();
618         reassign_maps = conf->reassign_maps;
619         put_multipath_config(conf);
620         vector_foreach_slot (ompv, ompp, i) {
621                 condlog(3, "%s: coalesce map", ompp->alias);
622                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
623                         /*
624                          * remove all current maps not allowed by the
625                          * current configuration
626                          */
627                         if (dm_flush_map(ompp->alias)) {
628                                 condlog(0, "%s: unable to flush devmap",
629                                         ompp->alias);
630                                 /*
631                                  * may be just because the device is open
632                                  */
633                                 if (setup_multipath(vecs, ompp) != 0) {
634                                         i--;
635                                         continue;
636                                 }
637                                 if (!vector_alloc_slot(nmpv))
638                                         return 1;
639
640                                 vector_set_slot(nmpv, ompp);
641
642                                 vector_del_slot(ompv, i);
643                                 i--;
644                         }
645                         else {
646                                 dm_lib_release();
647                                 condlog(2, "%s devmap removed", ompp->alias);
648                         }
649                 } else if (reassign_maps) {
650                         condlog(3, "%s: Reassign existing device-mapper"
651                                 " devices", ompp->alias);
652                         dm_reassign(ompp->alias);
653                 }
654         }
655         return 0;
656 }
657
658 static void
659 sync_maps_state(vector mpvec)
660 {
661         unsigned int i;
662         struct multipath *mpp;
663
664         vector_foreach_slot (mpvec, mpp, i)
665                 sync_map_state(mpp);
666 }
667
668 static int
669 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
670 {
671         int r;
672
673         if (nopaths)
674                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
675         else
676                 r = dm_flush_map(mpp->alias);
677         /*
678          * clear references to this map before flushing so we can ignore
679          * the spurious uevent we may generate with the dm_flush_map call below
680          */
681         if (r) {
682                 /*
683                  * May not really be an error -- if the map was already flushed
684                  * from the device mapper by dmsetup(8) for instance.
685                  */
686                 if (r == 1)
687                         condlog(0, "%s: can't flush", mpp->alias);
688                 else {
689                         condlog(2, "%s: devmap deferred remove", mpp->alias);
690                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
691                 }
692                 return r;
693         }
694         else {
695                 dm_lib_release();
696                 condlog(2, "%s: map flushed", mpp->alias);
697         }
698
699         orphan_paths(vecs->pathvec, mpp, "map flushed");
700         remove_map_and_stop_waiter(mpp, vecs);
701
702         return 0;
703 }
704
705 static int
706 uev_add_map (struct uevent * uev, struct vectors * vecs)
707 {
708         char *alias;
709         int major = -1, minor = -1, rc;
710
711         condlog(3, "%s: add map (uevent)", uev->kernel);
712         alias = uevent_get_dm_name(uev);
713         if (!alias) {
714                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
715                 major = uevent_get_major(uev);
716                 minor = uevent_get_minor(uev);
717                 alias = dm_mapname(major, minor);
718                 if (!alias) {
719                         condlog(2, "%s: mapname not found for %d:%d",
720                                 uev->kernel, major, minor);
721                         return 1;
722                 }
723         }
724         pthread_cleanup_push(cleanup_lock, &vecs->lock);
725         lock(&vecs->lock);
726         pthread_testcancel();
727         rc = ev_add_map(uev->kernel, alias, vecs);
728         lock_cleanup_pop(vecs->lock);
729         FREE(alias);
730         return rc;
731 }
732
733 /*
734  * ev_add_map expects that the multipath device already exists in kernel
735  * before it is called. It just adds a device to multipathd or updates an
736  * existing device.
737  */
738 int
739 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
740 {
741         struct multipath * mpp;
742         int delayed_reconfig, reassign_maps;
743         struct config *conf;
744
745         if (dm_is_mpath(alias) != 1) {
746                 condlog(4, "%s: not a multipath map", alias);
747                 return 0;
748         }
749
750         mpp = find_mp_by_alias(vecs->mpvec, alias);
751
752         if (mpp) {
753                 if (mpp->wait_for_udev > 1) {
754                         condlog(2, "%s: performing delayed actions",
755                                 mpp->alias);
756                         if (update_map(mpp, vecs, 0))
757                                 /* setup multipathd removed the map */
758                                 return 1;
759                 }
760                 conf = get_multipath_config();
761                 delayed_reconfig = conf->delayed_reconfig;
762                 reassign_maps = conf->reassign_maps;
763                 put_multipath_config(conf);
764                 if (mpp->wait_for_udev) {
765                         mpp->wait_for_udev = 0;
766                         if (delayed_reconfig &&
767                             !need_to_delay_reconfig(vecs)) {
768                                 condlog(2, "reconfigure (delayed)");
769                                 set_config_state(DAEMON_CONFIGURE);
770                                 return 0;
771                         }
772                 }
773                 /*
774                  * Not really an error -- we generate our own uevent
775                  * if we create a multipath mapped device as a result
776                  * of uev_add_path
777                  */
778                 if (reassign_maps) {
779                         condlog(3, "%s: Reassign existing device-mapper devices",
780                                 alias);
781                         dm_reassign(alias);
782                 }
783                 return 0;
784         }
785         condlog(2, "%s: adding map", alias);
786
787         /*
788          * now we can register the map
789          */
790         if ((mpp = add_map_without_path(vecs, alias))) {
791                 sync_map_state(mpp);
792                 condlog(2, "%s: devmap %s registered", alias, dev);
793                 return 0;
794         } else {
795                 condlog(2, "%s: ev_add_map failed", dev);
796                 return 1;
797         }
798 }
799
800 static int
801 uev_remove_map (struct uevent * uev, struct vectors * vecs)
802 {
803         char *alias;
804         int minor;
805         struct multipath *mpp;
806
807         condlog(3, "%s: remove map (uevent)", uev->kernel);
808         alias = uevent_get_dm_name(uev);
809         if (!alias) {
810                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
811                 return 0;
812         }
813         minor = uevent_get_minor(uev);
814
815         pthread_cleanup_push(cleanup_lock, &vecs->lock);
816         lock(&vecs->lock);
817         pthread_testcancel();
818         mpp = find_mp_by_minor(vecs->mpvec, minor);
819
820         if (!mpp) {
821                 condlog(2, "%s: devmap not registered, can't remove",
822                         uev->kernel);
823                 goto out;
824         }
825         if (strcmp(mpp->alias, alias)) {
826                 condlog(2, "%s: map alias mismatch: have \"%s\", got \"%s\")",
827                         uev->kernel, mpp->alias, alias);
828                 goto out;
829         }
830
831         remove_map_and_stop_waiter(mpp, vecs);
832 out:
833         lock_cleanup_pop(vecs->lock);
834         FREE(alias);
835         return 0;
836 }
837
838 /* Called from CLI handler */
839 int
840 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
841 {
842         struct multipath * mpp;
843
844         mpp = find_mp_by_minor(vecs->mpvec, minor);
845
846         if (!mpp) {
847                 condlog(2, "%s: devmap not registered, can't remove",
848                         devname);
849                 return 1;
850         }
851         if (strcmp(mpp->alias, alias)) {
852                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
853                         mpp->alias, mpp->dmi->minor, minor);
854                 return 1;
855         }
856         return flush_map(mpp, vecs, 0);
857 }
858
859 static int
860 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
861 {
862         struct path *pp;
863         int ret = 0, i;
864         struct config *conf;
865
866         condlog(3, "%s: add path (uevent)", uev->kernel);
867         if (strstr(uev->kernel, "..") != NULL) {
868                 /*
869                  * Don't allow relative device names in the pathvec
870                  */
871                 condlog(0, "%s: path name is invalid", uev->kernel);
872                 return 1;
873         }
874
875         pthread_cleanup_push(cleanup_lock, &vecs->lock);
876         lock(&vecs->lock);
877         pthread_testcancel();
878         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
879         if (pp) {
880                 int r;
881
882                 condlog(3, "%s: spurious uevent, path already in pathvec",
883                         uev->kernel);
884                 if (!pp->mpp && !strlen(pp->wwid)) {
885                         condlog(3, "%s: reinitialize path", uev->kernel);
886                         udev_device_unref(pp->udev);
887                         pp->udev = udev_device_ref(uev->udev);
888                         conf = get_multipath_config();
889                         pthread_cleanup_push(put_multipath_config, conf);
890                         r = pathinfo(pp, conf,
891                                      DI_ALL | DI_BLACKLIST);
892                         pthread_cleanup_pop(1);
893                         if (r == PATHINFO_OK)
894                                 ret = ev_add_path(pp, vecs, need_do_map);
895                         else if (r == PATHINFO_SKIPPED) {
896                                 condlog(3, "%s: remove blacklisted path",
897                                         uev->kernel);
898                                 i = find_slot(vecs->pathvec, (void *)pp);
899                                 if (i != -1)
900                                         vector_del_slot(vecs->pathvec, i);
901                                 free_path(pp);
902                         } else {
903                                 condlog(0, "%s: failed to reinitialize path",
904                                         uev->kernel);
905                                 ret = 1;
906                         }
907                 }
908         }
909         if (pp)
910                 goto out;
911
912         /*
913          * get path vital state
914          */
915         conf = get_multipath_config();
916         pthread_cleanup_push(put_multipath_config, conf);
917         ret = alloc_path_with_pathinfo(conf, uev->udev,
918                                        uev->wwid, DI_ALL, &pp);
919         pthread_cleanup_pop(1);
920         if (!pp) {
921                 if (ret == PATHINFO_SKIPPED)
922                         ret = 0;
923                 else {
924                         condlog(3, "%s: failed to get path info", uev->kernel);
925                         ret = 1;
926                 }
927                 goto out;
928         }
929         ret = store_path(vecs->pathvec, pp);
930         if (!ret) {
931                 conf = get_multipath_config();
932                 pp->checkint = conf->checkint;
933                 put_multipath_config(conf);
934                 ret = ev_add_path(pp, vecs, need_do_map);
935         } else {
936                 condlog(0, "%s: failed to store path info, "
937                         "dropping event",
938                         uev->kernel);
939                 free_path(pp);
940                 ret = 1;
941         }
942 out:
943         lock_cleanup_pop(vecs->lock);
944         return ret;
945 }
946
947 /*
948  * returns:
949  * 0: added
950  * 1: error
951  */
952 int
953 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
954 {
955         struct multipath * mpp;
956         char params[PARAMS_SIZE] = {0};
957         int retries = 3;
958         int start_waiter = 0;
959         int ret;
960
961         /*
962          * need path UID to go any further
963          */
964         if (strlen(pp->wwid) == 0) {
965                 condlog(0, "%s: failed to get path uid", pp->dev);
966                 goto fail; /* leave path added to pathvec */
967         }
968         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
969         if (mpp && pp->size && mpp->size != pp->size) {
970                 condlog(0, "%s: failed to add new path %s, device size mismatch", mpp->alias, pp->dev);
971                 int i = find_slot(vecs->pathvec, (void *)pp);
972                 if (i != -1)
973                         vector_del_slot(vecs->pathvec, i);
974                 free_path(pp);
975                 return 1;
976         }
977         if (mpp && mpp->wait_for_udev &&
978             (pathcount(mpp, PATH_UP) > 0 ||
979              (pathcount(mpp, PATH_GHOST) > 0 &&
980               path_get_tpgs(pp) != TPGS_IMPLICIT &&
981               mpp->ghost_delay_tick <= 0))) {
982                 /* if wait_for_udev is set and valid paths exist */
983                 condlog(3, "%s: delaying path addition until %s is fully initialized",
984                         pp->dev, mpp->alias);
985                 mpp->wait_for_udev = 2;
986                 orphan_path(pp, "waiting for create to complete");
987                 return 0;
988         }
989
990         pp->mpp = mpp;
991 rescan:
992         if (mpp) {
993                 condlog(4,"%s: adopting all paths for path %s",
994                         mpp->alias, pp->dev);
995                 if (adopt_paths(vecs->pathvec, mpp))
996                         goto fail; /* leave path added to pathvec */
997
998                 verify_paths(mpp, vecs);
999                 mpp->action = ACT_RELOAD;
1000         } else {
1001                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
1002                         orphan_path(pp, "only one path");
1003                         return 0;
1004                 }
1005                 condlog(4,"%s: creating new map", pp->dev);
1006                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
1007                         mpp->action = ACT_CREATE;
1008                         /*
1009                          * We don't depend on ACT_CREATE, as domap will
1010                          * set it to ACT_NOTHING when complete.
1011                          */
1012                         start_waiter = 1;
1013                 }
1014                 if (!start_waiter)
1015                         goto fail; /* leave path added to pathvec */
1016         }
1017
1018         /* persistent reservation check*/
1019         mpath_pr_event_handle(pp);
1020
1021         if (!need_do_map)
1022                 return 0;
1023
1024         if (!dm_map_present(mpp->alias)) {
1025                 mpp->action = ACT_CREATE;
1026                 start_waiter = 1;
1027         }
1028         /*
1029          * push the map to the device-mapper
1030          */
1031         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1032                 condlog(0, "%s: failed to setup map for addition of new "
1033                         "path %s", mpp->alias, pp->dev);
1034                 goto fail_map;
1035         }
1036         /*
1037          * reload the map for the multipath mapped device
1038          */
1039         ret = domap(mpp, params, 1);
1040         while (ret == DOMAP_RETRY && retries-- > 0) {
1041                 condlog(0, "%s: retry domap for addition of new "
1042                         "path %s", mpp->alias, pp->dev);
1043                 sleep(1);
1044                 ret = domap(mpp, params, 1);
1045         }
1046         if (ret == DOMAP_FAIL || ret == DOMAP_RETRY) {
1047                 condlog(0, "%s: failed in domap for addition of new "
1048                         "path %s", mpp->alias, pp->dev);
1049                 /*
1050                  * deal with asynchronous uevents :((
1051                  */
1052                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1053                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
1054                         sleep(1);
1055                         update_mpp_paths(mpp, vecs->pathvec);
1056                         goto rescan;
1057                 }
1058                 else if (mpp->action == ACT_RELOAD)
1059                         condlog(0, "%s: giving up reload", mpp->alias);
1060                 else
1061                         goto fail_map;
1062         }
1063         dm_lib_release();
1064
1065         if ((mpp->action == ACT_CREATE ||
1066              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1067             wait_for_events(mpp, vecs))
1068                         goto fail_map;
1069
1070         /*
1071          * update our state from kernel regardless of create or reload
1072          */
1073         if (setup_multipath(vecs, mpp))
1074                 goto fail; /* if setup_multipath fails, it removes the map */
1075
1076         sync_map_state(mpp);
1077
1078         if (retries >= 0) {
1079                 condlog(2, "%s [%s]: path added to devmap %s",
1080                         pp->dev, pp->dev_t, mpp->alias);
1081                 return 0;
1082         } else
1083                 goto fail;
1084
1085 fail_map:
1086         remove_map(mpp, vecs, 1);
1087 fail:
1088         orphan_path(pp, "failed to add path");
1089         return 1;
1090 }
1091
1092 static int
1093 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1094 {
1095         struct path *pp;
1096         int ret;
1097
1098         condlog(3, "%s: remove path (uevent)", uev->kernel);
1099         delete_foreign(uev->udev);
1100
1101         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1102         lock(&vecs->lock);
1103         pthread_testcancel();
1104         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1105         if (pp)
1106                 ret = ev_remove_path(pp, vecs, need_do_map);
1107         lock_cleanup_pop(vecs->lock);
1108         if (!pp) {
1109                 /* Not an error; path might have been purged earlier */
1110                 condlog(0, "%s: path already removed", uev->kernel);
1111                 return 0;
1112         }
1113         return ret;
1114 }
1115
1116 int
1117 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1118 {
1119         struct multipath * mpp;
1120         int i, retval = 0;
1121         char params[PARAMS_SIZE] = {0};
1122
1123         /*
1124          * avoid referring to the map of an orphaned path
1125          */
1126         if ((mpp = pp->mpp)) {
1127                 /*
1128                  * transform the mp->pg vector of vectors of paths
1129                  * into a mp->params string to feed the device-mapper
1130                  */
1131                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1132                         condlog(0, "%s: failed to update paths",
1133                                 mpp->alias);
1134                         goto fail;
1135                 }
1136
1137                 /*
1138                  * Make sure mpp->hwe doesn't point to freed memory
1139                  * We call extract_hwe_from_path() below to restore mpp->hwe
1140                  */
1141                 if (mpp->hwe == pp->hwe)
1142                         mpp->hwe = NULL;
1143
1144                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1145                         vector_del_slot(mpp->paths, i);
1146
1147                 /*
1148                  * remove the map IF removing the last path
1149                  */
1150                 if (VECTOR_SIZE(mpp->paths) == 0) {
1151                         char alias[WWID_SIZE];
1152
1153                         /*
1154                          * flush_map will fail if the device is open
1155                          */
1156                         strlcpy(alias, mpp->alias, WWID_SIZE);
1157                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1158                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1159                                 mpp->retry_tick = 0;
1160                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1161                                 mpp->disable_queueing = 1;
1162                                 mpp->stat_map_failures++;
1163                                 dm_queue_if_no_path(mpp->alias, 0);
1164                         }
1165                         if (!flush_map(mpp, vecs, 1)) {
1166                                 condlog(2, "%s: removed map after"
1167                                         " removing all paths",
1168                                         alias);
1169                                 retval = 0;
1170                                 goto out;
1171                         }
1172                         /*
1173                          * Not an error, continue
1174                          */
1175                 }
1176
1177                 if (mpp->hwe == NULL)
1178                         extract_hwe_from_path(mpp);
1179
1180                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1181                         condlog(0, "%s: failed to setup map for"
1182                                 " removal of path %s", mpp->alias, pp->dev);
1183                         goto fail;
1184                 }
1185
1186                 if (mpp->wait_for_udev) {
1187                         mpp->wait_for_udev = 2;
1188                         goto out;
1189                 }
1190
1191                 if (!need_do_map)
1192                         goto out;
1193                 /*
1194                  * reload the map
1195                  */
1196                 mpp->action = ACT_RELOAD;
1197                 if (domap(mpp, params, 1) == DOMAP_FAIL) {
1198                         condlog(0, "%s: failed in domap for "
1199                                 "removal of path %s",
1200                                 mpp->alias, pp->dev);
1201                         retval = 1;
1202                 } else {
1203                         /*
1204                          * update our state from kernel
1205                          */
1206                         if (setup_multipath(vecs, mpp))
1207                                 return 1;
1208                         sync_map_state(mpp);
1209
1210                         condlog(2, "%s [%s]: path removed from map %s",
1211                                 pp->dev, pp->dev_t, mpp->alias);
1212                 }
1213         }
1214
1215 out:
1216         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1217                 vector_del_slot(vecs->pathvec, i);
1218
1219         free_path(pp);
1220
1221         return retval;
1222
1223 fail:
1224         remove_map_and_stop_waiter(mpp, vecs);
1225         return 1;
1226 }
1227
1228 static int
1229 uev_update_path (struct uevent *uev, struct vectors * vecs)
1230 {
1231         int ro, retval = 0, rc;
1232         struct path * pp;
1233         struct config *conf;
1234         int needs_reinit = 0;
1235
1236         switch ((rc = change_foreign(uev->udev))) {
1237         case FOREIGN_OK:
1238                 /* known foreign path, ignore event */
1239                 return 0;
1240         case FOREIGN_IGNORED:
1241                 break;
1242         case FOREIGN_ERR:
1243                 condlog(3, "%s: error in change_foreign", __func__);
1244                 break;
1245         default:
1246                 condlog(1, "%s: return code %d of change_forein is unsupported",
1247                         __func__, rc);
1248                 break;
1249         }
1250
1251         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1252         lock(&vecs->lock);
1253         pthread_testcancel();
1254
1255         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1256         if (pp) {
1257                 struct multipath *mpp = pp->mpp;
1258                 char wwid[WWID_SIZE];
1259
1260                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1261                         needs_reinit = 1;
1262                         goto out;
1263                 }
1264                 /* Don't deal with other types of failed initialization
1265                  * now. check_path will handle it */
1266                 if (!strlen(pp->wwid))
1267                         goto out;
1268
1269                 strcpy(wwid, pp->wwid);
1270                 rc = get_uid(pp, pp->state, uev->udev, 0);
1271
1272                 if (rc != 0)
1273                         strcpy(pp->wwid, wwid);
1274                 else if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1275                         condlog(0, "%s: path wwid changed from '%s' to '%s'",
1276                                 uev->kernel, wwid, pp->wwid);
1277                         ev_remove_path(pp, vecs, 1);
1278                         needs_reinit = 1;
1279                         goto out;
1280                 } else {
1281                         udev_device_unref(pp->udev);
1282                         pp->udev = udev_device_ref(uev->udev);
1283                         conf = get_multipath_config();
1284                         pthread_cleanup_push(put_multipath_config, conf);
1285                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1286                                 condlog(1, "%s: pathinfo failed after change uevent",
1287                                         uev->kernel);
1288                         pthread_cleanup_pop(1);
1289                 }
1290
1291                 ro = uevent_get_disk_ro(uev);
1292                 if (mpp && ro >= 0) {
1293                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1294
1295                         if (mpp->wait_for_udev)
1296                                 mpp->wait_for_udev = 2;
1297                         else {
1298                                 if (ro == 1)
1299                                         pp->mpp->force_readonly = 1;
1300                                 retval = update_path_groups(mpp, vecs, 0);
1301                                 if (retval == 2)
1302                                         condlog(2, "%s: map removed during reload", pp->dev);
1303                                 else {
1304                                         pp->mpp->force_readonly = 0;
1305                                         condlog(2, "%s: map %s reloaded (retval %d)", uev->kernel, mpp->alias, retval);
1306                                 }
1307                         }
1308                 }
1309         }
1310 out:
1311         lock_cleanup_pop(vecs->lock);
1312         if (!pp) {
1313                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1314                 if (uev->udev) {
1315                         int flag = DI_SYSFS | DI_WWID;
1316
1317                         conf = get_multipath_config();
1318                         pthread_cleanup_push(put_multipath_config, conf);
1319                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1320                         pthread_cleanup_pop(1);
1321
1322                         if (retval == PATHINFO_SKIPPED) {
1323                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1324                                 return 0;
1325                         }
1326                 }
1327
1328                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1329         }
1330         if (needs_reinit)
1331                 retval = uev_add_path(uev, vecs, 1);
1332         return retval;
1333 }
1334
1335 static int
1336 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1337 {
1338         char *action = NULL, *devt = NULL;
1339         struct path *pp;
1340         int r = 1;
1341
1342         action = uevent_get_dm_action(uev);
1343         if (!action)
1344                 return 1;
1345         if (strncmp(action, "PATH_FAILED", 11))
1346                 goto out;
1347         devt = uevent_get_dm_path(uev);
1348         if (!devt) {
1349                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1350                 goto out;
1351         }
1352
1353         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1354         lock(&vecs->lock);
1355         pthread_testcancel();
1356         pp = find_path_by_devt(vecs->pathvec, devt);
1357         if (!pp)
1358                 goto out_lock;
1359         r = io_err_stat_handle_pathfail(pp);
1360         if (r)
1361                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1362                                 pp->dev);
1363 out_lock:
1364         lock_cleanup_pop(vecs->lock);
1365         FREE(devt);
1366         FREE(action);
1367         return r;
1368 out:
1369         FREE(action);
1370         return 1;
1371 }
1372
1373 static int
1374 map_discovery (struct vectors * vecs)
1375 {
1376         struct multipath * mpp;
1377         unsigned int i;
1378
1379         if (dm_get_maps(vecs->mpvec))
1380                 return 1;
1381
1382         vector_foreach_slot (vecs->mpvec, mpp, i)
1383                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1384                     update_multipath_status(mpp)) {
1385                         remove_map(mpp, vecs, 1);
1386                         i--;
1387                 }
1388
1389         return 0;
1390 }
1391
1392 int
1393 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1394                 void * trigger_data)
1395 {
1396         struct vectors * vecs;
1397         int r;
1398
1399         *reply = NULL;
1400         *len = 0;
1401         vecs = (struct vectors *)trigger_data;
1402
1403         if ((str != NULL) && (is_root == false) &&
1404             (strncmp(str, "list", strlen("list")) != 0) &&
1405             (strncmp(str, "show", strlen("show")) != 0)) {
1406                 *reply = STRDUP("permission deny: need to be root");
1407                 if (*reply)
1408                         *len = strlen(*reply) + 1;
1409                 return 1;
1410         }
1411
1412         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1413
1414         if (r > 0) {
1415                 if (r == ETIMEDOUT)
1416                         *reply = STRDUP("timeout\n");
1417                 else
1418                         *reply = STRDUP("fail\n");
1419                 if (*reply)
1420                         *len = strlen(*reply) + 1;
1421                 r = 1;
1422         }
1423         else if (!r && *len == 0) {
1424                 *reply = STRDUP("ok\n");
1425                 if (*reply)
1426                         *len = strlen(*reply) + 1;
1427                 r = 0;
1428         }
1429         /* else if (r < 0) leave *reply alone */
1430
1431         return r;
1432 }
1433
1434 int
1435 uev_trigger (struct uevent * uev, void * trigger_data)
1436 {
1437         int r = 0;
1438         struct vectors * vecs;
1439         struct uevent *merge_uev, *tmp;
1440         enum daemon_status state;
1441
1442         vecs = (struct vectors *)trigger_data;
1443
1444         pthread_cleanup_push(config_cleanup, NULL);
1445         pthread_mutex_lock(&config_lock);
1446         while (running_state != DAEMON_IDLE &&
1447                running_state != DAEMON_RUNNING &&
1448                running_state != DAEMON_SHUTDOWN)
1449                 pthread_cond_wait(&config_cond, &config_lock);
1450         state = running_state;
1451         pthread_cleanup_pop(1);
1452
1453         if (state == DAEMON_SHUTDOWN)
1454                 return 0;
1455
1456         /*
1457          * device map event
1458          * Add events are ignored here as the tables
1459          * are not fully initialised then.
1460          */
1461         if (!strncmp(uev->kernel, "dm-", 3)) {
1462                 if (!uevent_is_mpath(uev)) {
1463                         if (!strncmp(uev->action, "change", 6))
1464                                 (void)add_foreign(uev->udev);
1465                         else if (!strncmp(uev->action, "remove", 6))
1466                                 (void)delete_foreign(uev->udev);
1467                         goto out;
1468                 }
1469                 if (!strncmp(uev->action, "change", 6)) {
1470                         r = uev_add_map(uev, vecs);
1471
1472                         /*
1473                          * the kernel-side dm-mpath issues a PATH_FAILED event
1474                          * when it encounters a path IO error. It is reason-
1475                          * able be the entry of path IO error accounting pro-
1476                          * cess.
1477                          */
1478                         uev_pathfail_check(uev, vecs);
1479                 } else if (!strncmp(uev->action, "remove", 6)) {
1480                         r = uev_remove_map(uev, vecs);
1481                 }
1482                 goto out;
1483         }
1484
1485         /*
1486          * path add/remove/change event, add/remove maybe merged
1487          */
1488         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1489                 if (!strncmp(merge_uev->action, "add", 3))
1490                         r += uev_add_path(merge_uev, vecs, 0);
1491                 if (!strncmp(merge_uev->action, "remove", 6))
1492                         r += uev_remove_path(merge_uev, vecs, 0);
1493         }
1494
1495         if (!strncmp(uev->action, "add", 3))
1496                 r += uev_add_path(uev, vecs, 1);
1497         if (!strncmp(uev->action, "remove", 6))
1498                 r += uev_remove_path(uev, vecs, 1);
1499         if (!strncmp(uev->action, "change", 6))
1500                 r += uev_update_path(uev, vecs);
1501
1502 out:
1503         return r;
1504 }
1505
1506 static void rcu_unregister(__attribute__((unused)) void *param)
1507 {
1508         rcu_unregister_thread();
1509 }
1510
1511 static void *
1512 ueventloop (void * ap)
1513 {
1514         struct udev *udev = ap;
1515
1516         pthread_cleanup_push(rcu_unregister, NULL);
1517         rcu_register_thread();
1518         if (uevent_listen(udev))
1519                 condlog(0, "error starting uevent listener");
1520         pthread_cleanup_pop(1);
1521         return NULL;
1522 }
1523
1524 static void *
1525 uevqloop (void * ap)
1526 {
1527         pthread_cleanup_push(rcu_unregister, NULL);
1528         rcu_register_thread();
1529         if (uevent_dispatch(&uev_trigger, ap))
1530                 condlog(0, "error starting uevent dispatcher");
1531         pthread_cleanup_pop(1);
1532         return NULL;
1533 }
1534 static void *
1535 uxlsnrloop (void * ap)
1536 {
1537         long ux_sock;
1538
1539         pthread_cleanup_push(rcu_unregister, NULL);
1540         rcu_register_thread();
1541
1542         ux_sock = ux_socket_listen(DEFAULT_SOCKET);
1543         if (ux_sock == -1) {
1544                 condlog(1, "could not create uxsock: %d", errno);
1545                 exit_daemon();
1546                 goto out;
1547         }
1548         pthread_cleanup_push(uxsock_cleanup, (void *)ux_sock);
1549
1550         if (cli_init()) {
1551                 condlog(1, "Failed to init uxsock listener");
1552                 exit_daemon();
1553                 goto out_sock;
1554         }
1555
1556         /* Tell main thread that thread has started */
1557         post_config_state(DAEMON_CONFIGURE);
1558
1559         set_handler_callback(LIST+PATHS, cli_list_paths);
1560         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1561         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1562         set_handler_callback(LIST+PATH, cli_list_path);
1563         set_handler_callback(LIST+MAPS, cli_list_maps);
1564         set_handler_callback(LIST+STATUS, cli_list_status);
1565         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1566         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1567         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1568         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1569         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1570         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1571         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1572         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1573         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1574         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1575         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1576         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1577         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1578         set_handler_callback(LIST+CONFIG, cli_list_config);
1579         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1580         set_handler_callback(LIST+DEVICES, cli_list_devices);
1581         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1582         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1583         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1584         set_handler_callback(ADD+PATH, cli_add_path);
1585         set_handler_callback(DEL+PATH, cli_del_path);
1586         set_handler_callback(ADD+MAP, cli_add_map);
1587         set_handler_callback(DEL+MAP, cli_del_map);
1588         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1589         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1590         set_handler_callback(SUSPEND+MAP, cli_suspend);
1591         set_handler_callback(RESUME+MAP, cli_resume);
1592         set_handler_callback(RESIZE+MAP, cli_resize);
1593         set_handler_callback(RELOAD+MAP, cli_reload);
1594         set_handler_callback(RESET+MAP, cli_reassign);
1595         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1596         set_handler_callback(FAIL+PATH, cli_fail);
1597         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1598         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1599         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1600         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1601         set_unlocked_handler_callback(QUIT, cli_quit);
1602         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1603         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1604         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1605         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1606         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1607         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1608         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1609         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1610         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1611         set_handler_callback(SETMARGINAL+PATH, cli_set_marginal);
1612         set_handler_callback(UNSETMARGINAL+PATH, cli_unset_marginal);
1613         set_handler_callback(UNSETMARGINAL+MAP, cli_unset_all_marginal);
1614
1615         umask(077);
1616         uxsock_listen(&uxsock_trigger, ux_sock, ap);
1617
1618 out_sock:
1619         pthread_cleanup_pop(1); /* uxsock_cleanup */
1620 out:
1621         pthread_cleanup_pop(1); /* rcu_unregister */
1622         return NULL;
1623 }
1624
1625 void
1626 exit_daemon (void)
1627 {
1628         post_config_state(DAEMON_SHUTDOWN);
1629 }
1630
1631 static void
1632 fail_path (struct path * pp, int del_active)
1633 {
1634         if (!pp->mpp)
1635                 return;
1636
1637         condlog(2, "checker failed path %s in map %s",
1638                  pp->dev_t, pp->mpp->alias);
1639
1640         dm_fail_path(pp->mpp->alias, pp->dev_t);
1641         if (del_active)
1642                 update_queue_mode_del_path(pp->mpp);
1643 }
1644
1645 /*
1646  * caller must have locked the path list before calling that function
1647  */
1648 static int
1649 reinstate_path (struct path * pp, int add_active)
1650 {
1651         int ret = 0;
1652
1653         if (!pp->mpp)
1654                 return 0;
1655
1656         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1657                 condlog(0, "%s: reinstate failed", pp->dev_t);
1658                 ret = 1;
1659         } else {
1660                 condlog(2, "%s: reinstated", pp->dev_t);
1661                 if (add_active)
1662                         update_queue_mode_add_path(pp->mpp);
1663         }
1664         return ret;
1665 }
1666
1667 static void
1668 enable_group(struct path * pp)
1669 {
1670         struct pathgroup * pgp;
1671
1672         /*
1673          * if path is added through uev_add_path, pgindex can be unset.
1674          * next update_strings() will set it, upon map reload event.
1675          *
1676          * we can safely return here, because upon map reload, all
1677          * PG will be enabled.
1678          */
1679         if (!pp->mpp->pg || !pp->pgindex)
1680                 return;
1681
1682         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1683
1684         if (pgp->status == PGSTATE_DISABLED) {
1685                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1686                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1687         }
1688 }
1689
1690 static void
1691 mpvec_garbage_collector (struct vectors * vecs)
1692 {
1693         struct multipath * mpp;
1694         unsigned int i;
1695
1696         if (!vecs->mpvec)
1697                 return;
1698
1699         vector_foreach_slot (vecs->mpvec, mpp, i) {
1700                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1701                         condlog(2, "%s: remove dead map", mpp->alias);
1702                         remove_map_and_stop_waiter(mpp, vecs);
1703                         i--;
1704                 }
1705         }
1706 }
1707
1708 /* This is called after a path has started working again. It the multipath
1709  * device for this path uses the followover failback type, and this is the
1710  * best pathgroup, and this is the first path in the pathgroup to come back
1711  * up, then switch to this pathgroup */
1712 static int
1713 followover_should_failback(struct path * pp)
1714 {
1715         struct pathgroup * pgp;
1716         struct path *pp1;
1717         int i;
1718
1719         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1720             !pp->mpp->pg || !pp->pgindex ||
1721             pp->pgindex != pp->mpp->bestpg)
1722                 return 0;
1723
1724         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1725         vector_foreach_slot(pgp->paths, pp1, i) {
1726                 if (pp1 == pp)
1727                         continue;
1728                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1729                         return 0;
1730         }
1731         return 1;
1732 }
1733
1734 static void
1735 missing_uev_wait_tick(struct vectors *vecs)
1736 {
1737         struct multipath * mpp;
1738         unsigned int i;
1739         int timed_out = 0, delayed_reconfig;
1740         struct config *conf;
1741
1742         vector_foreach_slot (vecs->mpvec, mpp, i) {
1743                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1744                         timed_out = 1;
1745                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1746                         if (mpp->wait_for_udev > 1 &&
1747                             update_map(mpp, vecs, 0)) {
1748                                 /* update_map removed map */
1749                                 i--;
1750                                 continue;
1751                         }
1752                         mpp->wait_for_udev = 0;
1753                 }
1754         }
1755
1756         conf = get_multipath_config();
1757         delayed_reconfig = conf->delayed_reconfig;
1758         put_multipath_config(conf);
1759         if (timed_out && delayed_reconfig &&
1760             !need_to_delay_reconfig(vecs)) {
1761                 condlog(2, "reconfigure (delayed)");
1762                 set_config_state(DAEMON_CONFIGURE);
1763         }
1764 }
1765
1766 static void
1767 ghost_delay_tick(struct vectors *vecs)
1768 {
1769         struct multipath * mpp;
1770         unsigned int i;
1771
1772         vector_foreach_slot (vecs->mpvec, mpp, i) {
1773                 if (mpp->ghost_delay_tick <= 0)
1774                         continue;
1775                 if (--mpp->ghost_delay_tick <= 0) {
1776                         condlog(0, "%s: timed out waiting for active path",
1777                                 mpp->alias);
1778                         mpp->force_udev_reload = 1;
1779                         if (update_map(mpp, vecs, 0) != 0) {
1780                                 /* update_map removed map */
1781                                 i--;
1782                                 continue;
1783                         }
1784                 }
1785         }
1786 }
1787
1788 static void
1789 defered_failback_tick (vector mpvec)
1790 {
1791         struct multipath * mpp;
1792         unsigned int i;
1793
1794         vector_foreach_slot (mpvec, mpp, i) {
1795                 /*
1796                  * deferred failback getting sooner
1797                  */
1798                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1799                         mpp->failback_tick--;
1800
1801                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1802                                 switch_pathgroup(mpp);
1803                 }
1804         }
1805 }
1806
1807 static void
1808 retry_count_tick(vector mpvec)
1809 {
1810         struct multipath *mpp;
1811         unsigned int i;
1812
1813         vector_foreach_slot (mpvec, mpp, i) {
1814                 if (mpp->retry_tick > 0) {
1815                         mpp->stat_total_queueing_time++;
1816                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1817                         if(--mpp->retry_tick == 0) {
1818                                 mpp->stat_map_failures++;
1819                                 dm_queue_if_no_path(mpp->alias, 0);
1820                                 condlog(2, "%s: Disable queueing", mpp->alias);
1821                         }
1822                 }
1823         }
1824 }
1825
1826 int update_prio(struct path *pp, int refresh_all)
1827 {
1828         int oldpriority;
1829         struct path *pp1;
1830         struct pathgroup * pgp;
1831         int i, j, changed = 0;
1832         struct config *conf;
1833
1834         if (refresh_all) {
1835                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1836                         vector_foreach_slot (pgp->paths, pp1, j) {
1837                                 oldpriority = pp1->priority;
1838                                 conf = get_multipath_config();
1839                                 pthread_cleanup_push(put_multipath_config,
1840                                                      conf);
1841                                 pathinfo(pp1, conf, DI_PRIO);
1842                                 pthread_cleanup_pop(1);
1843                                 if (pp1->priority != oldpriority)
1844                                         changed = 1;
1845                         }
1846                 }
1847                 return changed;
1848         }
1849         oldpriority = pp->priority;
1850         conf = get_multipath_config();
1851         pthread_cleanup_push(put_multipath_config, conf);
1852         if (pp->state != PATH_DOWN)
1853                 pathinfo(pp, conf, DI_PRIO);
1854         pthread_cleanup_pop(1);
1855
1856         if (pp->priority == oldpriority)
1857                 return 0;
1858         return 1;
1859 }
1860
1861 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1862 {
1863         if (reload_map(vecs, mpp, refresh, 1))
1864                 return 1;
1865
1866         dm_lib_release();
1867         if (setup_multipath(vecs, mpp) != 0)
1868                 return 2;
1869         sync_map_state(mpp);
1870
1871         return 0;
1872 }
1873
1874 static int check_path_reinstate_state(struct path * pp) {
1875         struct timespec curr_time;
1876
1877         /*
1878          * This function is only called when the path state changes
1879          * from "bad" to "good". pp->state reflects the *previous* state.
1880          * If this was "bad", we know that a failure must have occured
1881          * beforehand, and count that.
1882          * Note that we count path state _changes_ this way. If a path
1883          * remains in "bad" state, failure count is not increased.
1884          */
1885
1886         if (!((pp->mpp->san_path_err_threshold > 0) &&
1887                                 (pp->mpp->san_path_err_forget_rate > 0) &&
1888                                 (pp->mpp->san_path_err_recovery_time >0))) {
1889                 return 0;
1890         }
1891
1892         if (pp->disable_reinstate) {
1893                 /* If there are no other usable paths, reinstate the path */
1894                 if (pp->mpp->nr_active == 0) {
1895                         condlog(2, "%s : reinstating path early", pp->dev);
1896                         goto reinstate_path;
1897                 }
1898                 get_monotonic_time(&curr_time);
1899
1900                 /* If path became failed again or continue failed, should reset
1901                  * path san_path_err_forget_rate and path dis_reinstate_time to
1902                  * start a new stable check. 
1903                  */
1904                 if ((pp->state != PATH_UP) && (pp->state != PATH_GHOST) &&
1905                         (pp->state != PATH_DELAYED)) {
1906                         pp->san_path_err_forget_rate =
1907                                 pp->mpp->san_path_err_forget_rate;
1908                         pp->dis_reinstate_time = curr_time.tv_sec;
1909                 }
1910
1911                 if ((curr_time.tv_sec - pp->dis_reinstate_time ) > pp->mpp->san_path_err_recovery_time) {
1912                         condlog(2,"%s : reinstate the path after err recovery time", pp->dev);
1913                         goto reinstate_path;
1914                 }
1915                 return 1;
1916         }
1917         /* forget errors on a working path */
1918         if ((pp->state == PATH_UP || pp->state == PATH_GHOST) &&
1919                         pp->path_failures > 0) {
1920                 if (pp->san_path_err_forget_rate > 0){
1921                         pp->san_path_err_forget_rate--;
1922                 } else {
1923                         /* for every san_path_err_forget_rate number of
1924                          * successful path checks decrement path_failures by 1
1925                          */
1926                         pp->path_failures--;
1927                         pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1928                 }
1929                 return 0;
1930         }
1931
1932         /* If the path isn't recovering from a failed state, do nothing */
1933         if (pp->state != PATH_DOWN && pp->state != PATH_SHAKY &&
1934                         pp->state != PATH_TIMEOUT)
1935                 return 0;
1936
1937         if (pp->path_failures == 0)
1938                 pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1939
1940         pp->path_failures++;
1941
1942         /* if we don't know the currently time, we don't know how long to
1943          * delay the path, so there's no point in checking if we should
1944          */
1945
1946         get_monotonic_time(&curr_time);
1947         /* when path failures has exceeded the san_path_err_threshold
1948          * place the path in delayed state till san_path_err_recovery_time
1949          * so that the cutomer can rectify the issue within this time. After
1950          * the completion of san_path_err_recovery_time it should
1951          * automatically reinstate the path
1952          * (note: we know that san_path_err_threshold > 0 here).
1953          */
1954         if (pp->path_failures > (unsigned int)pp->mpp->san_path_err_threshold) {
1955                 condlog(2, "%s : hit error threshold. Delaying path reinstatement", pp->dev);
1956                 pp->dis_reinstate_time = curr_time.tv_sec;
1957                 pp->disable_reinstate = 1;
1958
1959                 return 1;
1960         } else {
1961                 return 0;
1962         }
1963
1964 reinstate_path:
1965         pp->path_failures = 0;
1966         pp->disable_reinstate = 0;
1967         pp->san_path_err_forget_rate = 0;
1968         return 0;
1969 }
1970
1971 static int
1972 should_skip_path(struct path *pp){
1973         if (marginal_path_check_enabled(pp->mpp)) {
1974                 if (pp->io_err_disable_reinstate && need_io_err_check(pp))
1975                         return 1;
1976         } else if (san_path_check_enabled(pp->mpp)) {
1977                 if (check_path_reinstate_state(pp))
1978                         return 1;
1979         }
1980         return 0;
1981 }
1982
1983 /*
1984  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1985  * and '0' otherwise
1986  */
1987 int
1988 check_path (struct vectors * vecs, struct path * pp, unsigned int ticks)
1989 {
1990         int newstate;
1991         int new_path_up = 0;
1992         int chkr_new_path_up = 0;
1993         int add_active;
1994         int disable_reinstate = 0;
1995         int oldchkrstate = pp->chkrstate;
1996         int retrigger_tries, verbosity;
1997         unsigned int checkint, max_checkint;
1998         struct config *conf;
1999         int marginal_pathgroups, marginal_changed = 0;
2000         int ret;
2001
2002         if ((pp->initialized == INIT_OK ||
2003              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
2004                 return 0;
2005
2006         if (pp->tick)
2007                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
2008         if (pp->tick)
2009                 return 0; /* don't check this path yet */
2010
2011         conf = get_multipath_config();
2012         retrigger_tries = conf->retrigger_tries;
2013         checkint = conf->checkint;
2014         max_checkint = conf->max_checkint;
2015         verbosity = conf->verbosity;
2016         marginal_pathgroups = conf->marginal_pathgroups;
2017         put_multipath_config(conf);
2018
2019         if (pp->checkint == CHECKINT_UNDEF) {
2020                 condlog(0, "%s: BUG: checkint is not set", pp->dev);
2021                 pp->checkint = checkint;
2022         };
2023
2024         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
2025                 if (pp->retriggers < retrigger_tries) {
2026                         condlog(2, "%s: triggering change event to reinitialize",
2027                                 pp->dev);
2028                         pp->initialized = INIT_REQUESTED_UDEV;
2029                         pp->retriggers++;
2030                         sysfs_attr_set_value(pp->udev, "uevent", "change",
2031                                              strlen("change"));
2032                         return 0;
2033                 } else {
2034                         condlog(1, "%s: not initialized after %d udev retriggers",
2035                                 pp->dev, retrigger_tries);
2036                         /*
2037                          * Make sure that the "add missing path" code path
2038                          * below may reinstate the path later, if it ever
2039                          * comes up again.
2040                          * The WWID needs not be cleared; if it was set, the
2041                          * state hadn't been INIT_MISSING_UDEV in the first
2042                          * place.
2043                          */
2044                         pp->initialized = INIT_FAILED;
2045                         return 0;
2046                 }
2047         }
2048
2049         /*
2050          * provision a next check soonest,
2051          * in case we exit abnormaly from here
2052          */
2053         pp->tick = checkint;
2054
2055         newstate = path_offline(pp);
2056         if (newstate == PATH_UP) {
2057                 conf = get_multipath_config();
2058                 pthread_cleanup_push(put_multipath_config, conf);
2059                 newstate = get_state(pp, conf, 1, newstate);
2060                 pthread_cleanup_pop(1);
2061         } else {
2062                 checker_clear_message(&pp->checker);
2063                 condlog(3, "%s: state %s, checker not called",
2064                         pp->dev, checker_state_name(newstate));
2065         }
2066         /*
2067          * Wait for uevent for removed paths;
2068          * some LLDDs like zfcp keep paths unavailable
2069          * without sending uevents.
2070          */
2071         if (newstate == PATH_REMOVED)
2072                 newstate = PATH_DOWN;
2073
2074         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
2075                 condlog(2, "%s: unusable path (%s) - checker failed",
2076                         pp->dev, checker_state_name(newstate));
2077                 LOG_MSG(2, verbosity, pp);
2078                 conf = get_multipath_config();
2079                 pthread_cleanup_push(put_multipath_config, conf);
2080                 pathinfo(pp, conf, 0);
2081                 pthread_cleanup_pop(1);
2082                 return 1;
2083         } else if ((newstate != PATH_UP && newstate != PATH_GHOST) &&
2084                         (pp->state == PATH_DELAYED)) {
2085                 /* If path state become failed again cancel path delay state */
2086                 pp->state = newstate;
2087                 return 1;
2088         }
2089         if (!pp->mpp) {
2090                 if (!strlen(pp->wwid) &&
2091                     (pp->initialized == INIT_FAILED ||
2092                      pp->initialized == INIT_NEW) &&
2093                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
2094                         condlog(2, "%s: add missing path", pp->dev);
2095                         conf = get_multipath_config();
2096                         pthread_cleanup_push(put_multipath_config, conf);
2097                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
2098                         pthread_cleanup_pop(1);
2099                         /* INIT_OK implies ret == PATHINFO_OK */
2100                         if (pp->initialized == INIT_OK) {
2101                                 ev_add_path(pp, vecs, 1);
2102                                 pp->tick = 1;
2103                         } else {
2104                                 /*
2105                                  * We failed multiple times to initialize this
2106                                  * path properly. Don't re-check too often.
2107                                  */
2108                                 pp->checkint = max_checkint;
2109                                 if (ret == PATHINFO_SKIPPED)
2110                                         return -1;
2111                         }
2112                 }
2113                 return 0;
2114         }
2115         /*
2116          * Async IO in flight. Keep the previous path state
2117          * and reschedule as soon as possible
2118          */
2119         if (newstate == PATH_PENDING) {
2120                 pp->tick = 1;
2121                 return 0;
2122         }
2123         /*
2124          * Synchronize with kernel state
2125          */
2126         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
2127                 condlog(1, "%s: Could not synchronize with kernel state",
2128                         pp->dev);
2129                 pp->dmstate = PSTATE_UNDEF;
2130         }
2131         /* if update_multipath_strings orphaned the path, quit early */
2132         if (!pp->mpp)
2133                 return 0;
2134         set_no_path_retry(pp->mpp);
2135
2136         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
2137             (san_path_check_enabled(pp->mpp) ||
2138              marginal_path_check_enabled(pp->mpp))) {
2139                 int was_marginal = pp->marginal;
2140                 if (should_skip_path(pp)) {
2141                         if (!marginal_pathgroups) {
2142                                 if (marginal_path_check_enabled(pp->mpp))
2143                                         /* to reschedule as soon as possible,
2144                                          * so that this path can be recovered
2145                                          * in time */
2146                                         pp->tick = 1;
2147                                 pp->state = PATH_DELAYED;
2148                                 return 1;
2149                         }
2150                         if (!was_marginal) {
2151                                 pp->marginal = 1;
2152                                 marginal_changed = 1;
2153                         }
2154                 } else if (marginal_pathgroups && was_marginal) {
2155                         pp->marginal = 0;
2156                         marginal_changed = 1;
2157                 }
2158         }
2159
2160         /*
2161          * don't reinstate failed path, if its in stand-by
2162          * and if target supports only implicit tpgs mode.
2163          * this will prevent unnecessary i/o by dm on stand-by
2164          * paths if there are no other active paths in map.
2165          */
2166         disable_reinstate = (newstate == PATH_GHOST &&
2167                              pp->mpp->nr_active == 0 &&
2168                              path_get_tpgs(pp) == TPGS_IMPLICIT) ? 1 : 0;
2169
2170         pp->chkrstate = newstate;
2171         if (newstate != pp->state) {
2172                 int oldstate = pp->state;
2173                 pp->state = newstate;
2174
2175                 LOG_MSG(1, verbosity, pp);
2176
2177                 /*
2178                  * upon state change, reset the checkint
2179                  * to the shortest delay
2180                  */
2181                 conf = get_multipath_config();
2182                 pp->checkint = conf->checkint;
2183                 put_multipath_config(conf);
2184
2185                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
2186                         /*
2187                          * proactively fail path in the DM
2188                          */
2189                         if (oldstate == PATH_UP ||
2190                             oldstate == PATH_GHOST)
2191                                 fail_path(pp, 1);
2192                         else
2193                                 fail_path(pp, 0);
2194
2195                         /*
2196                          * cancel scheduled failback
2197                          */
2198                         pp->mpp->failback_tick = 0;
2199
2200                         pp->mpp->stat_path_failures++;
2201                         return 1;
2202                 }
2203
2204                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2205                         if (pp->mpp->prflag) {
2206                                 /*
2207                                  * Check Persistent Reservation.
2208                                  */
2209                                 condlog(2, "%s: checking persistent "
2210                                         "reservation registration", pp->dev);
2211                                 mpath_pr_event_handle(pp);
2212                         }
2213                 }
2214
2215                 /*
2216                  * reinstate this path
2217                  */
2218                 if (oldstate != PATH_UP &&
2219                     oldstate != PATH_GHOST)
2220                         add_active = 1;
2221                 else
2222                         add_active = 0;
2223                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2224                         condlog(3, "%s: reload map", pp->dev);
2225                         ev_add_path(pp, vecs, 1);
2226                         pp->tick = 1;
2227                         return 0;
2228                 }
2229                 new_path_up = 1;
2230
2231                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2232                         chkr_new_path_up = 1;
2233
2234                 /*
2235                  * if at least one path is up in a group, and
2236                  * the group is disabled, re-enable it
2237                  */
2238                 if (newstate == PATH_UP)
2239                         enable_group(pp);
2240         }
2241         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2242                 if ((pp->dmstate == PSTATE_FAILED ||
2243                     pp->dmstate == PSTATE_UNDEF) &&
2244                     !disable_reinstate) {
2245                         /* Clear IO errors */
2246                         if (reinstate_path(pp, 0)) {
2247                                 condlog(3, "%s: reload map", pp->dev);
2248                                 ev_add_path(pp, vecs, 1);
2249                                 pp->tick = 1;
2250                                 return 0;
2251                         }
2252                 } else {
2253                         LOG_MSG(4, verbosity, pp);
2254                         if (pp->checkint != max_checkint) {
2255                                 /*
2256                                  * double the next check delay.
2257                                  * max at conf->max_checkint
2258                                  */
2259                                 if (pp->checkint < (max_checkint / 2))
2260                                         pp->checkint = 2 * pp->checkint;
2261                                 else
2262                                         pp->checkint = max_checkint;
2263
2264                                 condlog(4, "%s: delay next check %is",
2265                                         pp->dev_t, pp->checkint);
2266                         }
2267                         pp->tick = pp->checkint;
2268                 }
2269         }
2270         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2271                 if (pp->dmstate == PSTATE_ACTIVE ||
2272                     pp->dmstate == PSTATE_UNDEF)
2273                         fail_path(pp, 0);
2274                 if (newstate == PATH_DOWN) {
2275                         int log_checker_err;
2276
2277                         conf = get_multipath_config();
2278                         log_checker_err = conf->log_checker_err;
2279                         put_multipath_config(conf);
2280                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2281                                 LOG_MSG(3, verbosity, pp);
2282                         else
2283                                 LOG_MSG(2, verbosity, pp);
2284                 }
2285         }
2286
2287         pp->state = newstate;
2288
2289         if (pp->mpp->wait_for_udev)
2290                 return 1;
2291         /*
2292          * path prio refreshing
2293          */
2294         condlog(4, "path prio refresh");
2295
2296         if (marginal_changed)
2297                 update_path_groups(pp->mpp, vecs, 1);
2298         else if (update_prio(pp, new_path_up) &&
2299             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2300              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2301                 update_path_groups(pp->mpp, vecs, !new_path_up);
2302         else if (need_switch_pathgroup(pp->mpp, 0)) {
2303                 if (pp->mpp->pgfailback > 0 &&
2304                     (new_path_up || pp->mpp->failback_tick <= 0))
2305                         pp->mpp->failback_tick =
2306                                 pp->mpp->pgfailback + 1;
2307                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2308                          (chkr_new_path_up && followover_should_failback(pp)))
2309                         switch_pathgroup(pp->mpp);
2310         }
2311         return 1;
2312 }
2313
2314 static void *
2315 checkerloop (void *ap)
2316 {
2317         struct vectors *vecs;
2318         struct path *pp;
2319         int count = 0;
2320         unsigned int i;
2321         struct timespec last_time;
2322         struct config *conf;
2323         int foreign_tick = 0;
2324
2325         pthread_cleanup_push(rcu_unregister, NULL);
2326         rcu_register_thread();
2327         mlockall(MCL_CURRENT | MCL_FUTURE);
2328         vecs = (struct vectors *)ap;
2329         condlog(2, "path checkers start up");
2330
2331         /* Tweak start time for initial path check */
2332         get_monotonic_time(&last_time);
2333         last_time.tv_sec -= 1;
2334
2335         while (1) {
2336                 struct timespec diff_time, start_time, end_time;
2337                 int num_paths = 0, strict_timing, rc = 0;
2338                 unsigned int ticks = 0;
2339
2340                 get_monotonic_time(&start_time);
2341                 if (start_time.tv_sec && last_time.tv_sec) {
2342                         timespecsub(&start_time, &last_time, &diff_time);
2343                         condlog(4, "tick (%lu.%06lu secs)",
2344                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2345                         last_time = start_time;
2346                         ticks = diff_time.tv_sec;
2347                 } else {
2348                         ticks = 1;
2349                         condlog(4, "tick (%d ticks)", ticks);
2350                 }
2351 #ifdef USE_SYSTEMD
2352                 if (use_watchdog)
2353                         sd_notify(0, "WATCHDOG=1");
2354 #endif
2355                 rc = set_config_state(DAEMON_RUNNING);
2356                 if (rc == ETIMEDOUT) {
2357                         condlog(4, "timeout waiting for DAEMON_IDLE");
2358                         continue;
2359                 } else if (rc == EINVAL)
2360                         /* daemon shutdown */
2361                         break;
2362
2363                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2364                 lock(&vecs->lock);
2365                 pthread_testcancel();
2366                 vector_foreach_slot (vecs->pathvec, pp, i) {
2367                         rc = check_path(vecs, pp, ticks);
2368                         if (rc < 0) {
2369                                 vector_del_slot(vecs->pathvec, i);
2370                                 free_path(pp);
2371                                 i--;
2372                         } else
2373                                 num_paths += rc;
2374                 }
2375                 lock_cleanup_pop(vecs->lock);
2376
2377                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2378                 lock(&vecs->lock);
2379                 pthread_testcancel();
2380                 defered_failback_tick(vecs->mpvec);
2381                 retry_count_tick(vecs->mpvec);
2382                 missing_uev_wait_tick(vecs);
2383                 ghost_delay_tick(vecs);
2384                 lock_cleanup_pop(vecs->lock);
2385
2386                 if (count)
2387                         count--;
2388                 else {
2389                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2390                         lock(&vecs->lock);
2391                         pthread_testcancel();
2392                         condlog(4, "map garbage collection");
2393                         mpvec_garbage_collector(vecs);
2394                         count = MAPGCINT;
2395                         lock_cleanup_pop(vecs->lock);
2396                 }
2397
2398                 diff_time.tv_nsec = 0;
2399                 if (start_time.tv_sec) {
2400                         get_monotonic_time(&end_time);
2401                         timespecsub(&end_time, &start_time, &diff_time);
2402                         if (num_paths) {
2403                                 unsigned int max_checkint;
2404
2405                                 condlog(4, "checked %d path%s in %lu.%06lu secs",
2406                                         num_paths, num_paths > 1 ? "s" : "",
2407                                         diff_time.tv_sec,
2408                                         diff_time.tv_nsec / 1000);
2409                                 conf = get_multipath_config();
2410                                 max_checkint = conf->max_checkint;
2411                                 put_multipath_config(conf);
2412                                 if (diff_time.tv_sec > max_checkint)
2413                                         condlog(1, "path checkers took longer "
2414                                                 "than %lu seconds, consider "
2415                                                 "increasing max_polling_interval",
2416                                                 diff_time.tv_sec);
2417                         }
2418                 }
2419
2420                 if (foreign_tick == 0) {
2421                         conf = get_multipath_config();
2422                         foreign_tick = conf->max_checkint;
2423                         put_multipath_config(conf);
2424                 }
2425                 if (--foreign_tick == 0)
2426                         check_foreign();
2427
2428                 post_config_state(DAEMON_IDLE);
2429                 conf = get_multipath_config();
2430                 strict_timing = conf->strict_timing;
2431                 put_multipath_config(conf);
2432                 if (!strict_timing)
2433                         sleep(1);
2434                 else {
2435                         if (diff_time.tv_nsec) {
2436                                 diff_time.tv_sec = 0;
2437                                 diff_time.tv_nsec =
2438                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2439                         } else
2440                                 diff_time.tv_sec = 1;
2441
2442                         condlog(3, "waiting for %lu.%06lu secs",
2443                                 diff_time.tv_sec,
2444                                 diff_time.tv_nsec / 1000);
2445                         if (nanosleep(&diff_time, NULL) != 0) {
2446                                 condlog(3, "nanosleep failed with error %d",
2447                                         errno);
2448                                 conf = get_multipath_config();
2449                                 conf->strict_timing = 0;
2450                                 put_multipath_config(conf);
2451                                 break;
2452                         }
2453                 }
2454         }
2455         pthread_cleanup_pop(1);
2456         return NULL;
2457 }
2458
2459 int
2460 configure (struct vectors * vecs)
2461 {
2462         struct multipath * mpp;
2463         struct path * pp;
2464         vector mpvec;
2465         int i, ret;
2466         struct config *conf;
2467         static int force_reload = FORCE_RELOAD_WEAK;
2468
2469         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2470                 condlog(0, "couldn't allocate path vec in configure");
2471                 return 1;
2472         }
2473
2474         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2475                 condlog(0, "couldn't allocate multipath vec in configure");
2476                 return 1;
2477         }
2478
2479         if (!(mpvec = vector_alloc())) {
2480                 condlog(0, "couldn't allocate new maps vec in configure");
2481                 return 1;
2482         }
2483
2484         /*
2485          * probe for current path (from sysfs) and map (from dm) sets
2486          */
2487         ret = path_discovery(vecs->pathvec, DI_ALL);
2488         if (ret < 0) {
2489                 condlog(0, "configure failed at path discovery");
2490                 goto fail;
2491         }
2492
2493         conf = get_multipath_config();
2494         pthread_cleanup_push(put_multipath_config, conf);
2495         vector_foreach_slot (vecs->pathvec, pp, i){
2496                 if (filter_path(conf, pp) > 0){
2497                         vector_del_slot(vecs->pathvec, i);
2498                         free_path(pp);
2499                         i--;
2500                 }
2501         }
2502         pthread_cleanup_pop(1);
2503
2504         if (map_discovery(vecs)) {
2505                 condlog(0, "configure failed at map discovery");
2506                 goto fail;
2507         }
2508
2509         /*
2510          * create new set of maps & push changed ones into dm
2511          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2512          * superfluous ACT_RELOAD ioctls. Later calls are done
2513          * with FORCE_RELOAD_YES.
2514          */
2515         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2516         if (force_reload == FORCE_RELOAD_WEAK)
2517                 force_reload = FORCE_RELOAD_YES;
2518         if (ret != CP_OK) {
2519                 condlog(0, "configure failed while coalescing paths");
2520                 goto fail;
2521         }
2522
2523         /*
2524          * may need to remove some maps which are no longer relevant
2525          * e.g., due to blacklist changes in conf file
2526          */
2527         if (coalesce_maps(vecs, mpvec)) {
2528                 condlog(0, "configure failed while coalescing maps");
2529                 goto fail;
2530         }
2531
2532         dm_lib_release();
2533
2534         sync_maps_state(mpvec);
2535         vector_foreach_slot(mpvec, mpp, i){
2536                 if (remember_wwid(mpp->wwid) == 1)
2537                         trigger_paths_udev_change(mpp, true);
2538                 update_map_pr(mpp);
2539         }
2540
2541         /*
2542          * purge dm of old maps
2543          */
2544         remove_maps(vecs);
2545
2546         /*
2547          * save new set of maps formed by considering current path state
2548          */
2549         vector_free(vecs->mpvec);
2550         vecs->mpvec = mpvec;
2551
2552         /*
2553          * start dm event waiter threads for these new maps
2554          */
2555         vector_foreach_slot(vecs->mpvec, mpp, i) {
2556                 if (wait_for_events(mpp, vecs)) {
2557                         remove_map(mpp, vecs, 1);
2558                         i--;
2559                         continue;
2560                 }
2561                 if (setup_multipath(vecs, mpp))
2562                         i--;
2563         }
2564         return 0;
2565
2566 fail:
2567         vector_free(mpvec);
2568         return 1;
2569 }
2570
2571 int
2572 need_to_delay_reconfig(struct vectors * vecs)
2573 {
2574         struct multipath *mpp;
2575         int i;
2576
2577         if (!VECTOR_SIZE(vecs->mpvec))
2578                 return 0;
2579
2580         vector_foreach_slot(vecs->mpvec, mpp, i) {
2581                 if (mpp->wait_for_udev)
2582                         return 1;
2583         }
2584         return 0;
2585 }
2586
2587 void rcu_free_config(struct rcu_head *head)
2588 {
2589         struct config *conf = container_of(head, struct config, rcu);
2590
2591         free_config(conf);
2592 }
2593
2594 int
2595 reconfigure (struct vectors * vecs)
2596 {
2597         struct config * old, *conf;
2598
2599         conf = load_config(DEFAULT_CONFIGFILE);
2600         if (!conf)
2601                 return 1;
2602
2603         /*
2604          * free old map and path vectors ... they use old conf state
2605          */
2606         if (VECTOR_SIZE(vecs->mpvec))
2607                 remove_maps_and_stop_waiters(vecs);
2608
2609         free_pathvec(vecs->pathvec, FREE_PATHS);
2610         vecs->pathvec = NULL;
2611         delete_all_foreign();
2612
2613         reset_checker_classes();
2614         /* Re-read any timezone changes */
2615         tzset();
2616
2617         dm_tgt_version(conf->version, TGT_MPATH);
2618         if (verbosity)
2619                 conf->verbosity = verbosity;
2620         if (bindings_read_only)
2621                 conf->bindings_read_only = bindings_read_only;
2622         uxsock_timeout = conf->uxsock_timeout;
2623
2624         old = rcu_dereference(multipath_conf);
2625         conf->sequence_nr = old->sequence_nr + 1;
2626         rcu_assign_pointer(multipath_conf, conf);
2627         call_rcu(&old->rcu, rcu_free_config);
2628
2629         configure(vecs);
2630
2631
2632         return 0;
2633 }
2634
2635 static struct vectors *
2636 init_vecs (void)
2637 {
2638         struct vectors * vecs;
2639
2640         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2641
2642         if (!vecs)
2643                 return NULL;
2644
2645         pthread_mutex_init(&vecs->lock.mutex, NULL);
2646
2647         return vecs;
2648 }
2649
2650 static void *
2651 signal_set(int signo, void (*func) (int))
2652 {
2653         int r;
2654         struct sigaction sig;
2655         struct sigaction osig;
2656
2657         sig.sa_handler = func;
2658         sigemptyset(&sig.sa_mask);
2659         sig.sa_flags = 0;
2660
2661         r = sigaction(signo, &sig, &osig);
2662
2663         if (r < 0)
2664                 return (SIG_ERR);
2665         else
2666                 return (osig.sa_handler);
2667 }
2668
2669 void
2670 handle_signals(bool nonfatal)
2671 {
2672         if (exit_sig) {
2673                 condlog(2, "exit (signal)");
2674                 exit_sig = 0;
2675                 exit_daemon();
2676         }
2677         if (!nonfatal)
2678                 return;
2679         if (reconfig_sig) {
2680                 condlog(2, "reconfigure (signal)");
2681                 set_config_state(DAEMON_CONFIGURE);
2682         }
2683         if (log_reset_sig) {
2684                 condlog(2, "reset log (signal)");
2685                 if (logsink == 1)
2686                         log_thread_reset();
2687         }
2688         reconfig_sig = 0;
2689         log_reset_sig = 0;
2690 }
2691
2692 static void
2693 sighup(__attribute__((unused)) int sig)
2694 {
2695         reconfig_sig = 1;
2696 }
2697
2698 static void
2699 sigend(__attribute__((unused)) int sig)
2700 {
2701         exit_sig = 1;
2702 }
2703
2704 static void
2705 sigusr1(__attribute__((unused)) int sig)
2706 {
2707         log_reset_sig = 1;
2708 }
2709
2710 static void
2711 sigusr2(__attribute__((unused)) int sig)
2712 {
2713         condlog(3, "SIGUSR2 received");
2714 }
2715
2716 static void
2717 signal_init(void)
2718 {
2719         sigset_t set;
2720
2721         /* block all signals */
2722         sigfillset(&set);
2723         /* SIGPIPE occurs if logging fails */
2724         sigdelset(&set, SIGPIPE);
2725         pthread_sigmask(SIG_SETMASK, &set, NULL);
2726
2727         /* Other signals will be unblocked in the uxlsnr thread */
2728         signal_set(SIGHUP, sighup);
2729         signal_set(SIGUSR1, sigusr1);
2730         signal_set(SIGUSR2, sigusr2);
2731         signal_set(SIGINT, sigend);
2732         signal_set(SIGTERM, sigend);
2733         signal_set(SIGPIPE, sigend);
2734 }
2735
2736 static void
2737 setscheduler (void)
2738 {
2739         int res;
2740         static struct sched_param sched_param = {
2741                 .sched_priority = 99
2742         };
2743
2744         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2745
2746         if (res == -1)
2747                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2748         return;
2749 }
2750
2751 static void
2752 set_oom_adj (void)
2753 {
2754 #ifdef OOM_SCORE_ADJ_MIN
2755         int retry = 1;
2756         char *file = "/proc/self/oom_score_adj";
2757         int score = OOM_SCORE_ADJ_MIN;
2758 #else
2759         int retry = 0;
2760         char *file = "/proc/self/oom_adj";
2761         int score = OOM_ADJUST_MIN;
2762 #endif
2763         FILE *fp;
2764         struct stat st;
2765         char *envp;
2766
2767         envp = getenv("OOMScoreAdjust");
2768         if (envp) {
2769                 condlog(3, "Using systemd provided OOMScoreAdjust");
2770                 return;
2771         }
2772         do {
2773                 if (stat(file, &st) == 0){
2774                         fp = fopen(file, "w");
2775                         if (!fp) {
2776                                 condlog(0, "couldn't fopen %s : %s", file,
2777                                         strerror(errno));
2778                                 return;
2779                         }
2780                         fprintf(fp, "%i", score);
2781                         fclose(fp);
2782                         return;
2783                 }
2784                 if (errno != ENOENT) {
2785                         condlog(0, "couldn't stat %s : %s", file,
2786                                 strerror(errno));
2787                         return;
2788                 }
2789 #ifdef OOM_ADJUST_MIN
2790                 file = "/proc/self/oom_adj";
2791                 score = OOM_ADJUST_MIN;
2792 #else
2793                 retry = 0;
2794 #endif
2795         } while (retry--);
2796         condlog(0, "couldn't adjust oom score");
2797 }
2798
2799 static int
2800 child (__attribute__((unused)) void *param)
2801 {
2802         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2803         pthread_attr_t log_attr, misc_attr, uevent_attr;
2804         struct vectors * vecs;
2805         struct multipath * mpp;
2806         int i;
2807 #ifdef USE_SYSTEMD
2808         unsigned long checkint;
2809         int startup_done = 0;
2810 #endif
2811         int rc;
2812         int pid_fd = -1;
2813         struct config *conf;
2814         char *envp;
2815         int queue_without_daemon;
2816         enum daemon_status state;
2817
2818         mlockall(MCL_CURRENT | MCL_FUTURE);
2819         signal_init();
2820         rcu_init();
2821
2822         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2823         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2824         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2825         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2826
2827         if (logsink == 1) {
2828                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2829                 log_thread_start(&log_attr);
2830                 pthread_attr_destroy(&log_attr);
2831         }
2832         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2833         if (pid_fd < 0) {
2834                 condlog(1, "failed to create pidfile");
2835                 if (logsink == 1)
2836                         log_thread_stop();
2837                 exit(1);
2838         }
2839
2840         post_config_state(DAEMON_START);
2841
2842         condlog(2, "--------start up--------");
2843         condlog(2, "read " DEFAULT_CONFIGFILE);
2844
2845         conf = load_config(DEFAULT_CONFIGFILE);
2846         if (!conf)
2847                 goto failed;
2848
2849         if (verbosity)
2850                 conf->verbosity = verbosity;
2851         if (bindings_read_only)
2852                 conf->bindings_read_only = bindings_read_only;
2853         uxsock_timeout = conf->uxsock_timeout;
2854         rcu_assign_pointer(multipath_conf, conf);
2855         if (init_checkers(conf->multipath_dir)) {
2856                 condlog(0, "failed to initialize checkers");
2857                 goto failed;
2858         }
2859         if (init_prio(conf->multipath_dir)) {
2860                 condlog(0, "failed to initialize prioritizers");
2861                 goto failed;
2862         }
2863         /* Failing this is non-fatal */
2864
2865         init_foreign(conf->multipath_dir, conf->enable_foreign);
2866
2867         if (poll_dmevents)
2868                 poll_dmevents = dmevent_poll_supported();
2869         setlogmask(LOG_UPTO(conf->verbosity + 3));
2870
2871         envp = getenv("LimitNOFILE");
2872
2873         if (envp)
2874                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2875         else
2876                 set_max_fds(conf->max_fds);
2877
2878         vecs = gvecs = init_vecs();
2879         if (!vecs)
2880                 goto failed;
2881
2882         setscheduler();
2883         set_oom_adj();
2884
2885 #ifdef USE_SYSTEMD
2886         envp = getenv("WATCHDOG_USEC");
2887         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2888                 /* Value is in microseconds */
2889                 conf->max_checkint = checkint / 1000000;
2890                 /* Rescale checkint */
2891                 if (conf->checkint > conf->max_checkint)
2892                         conf->checkint = conf->max_checkint;
2893                 else
2894                         conf->checkint = conf->max_checkint / 4;
2895                 condlog(3, "enabling watchdog, interval %d max %d",
2896                         conf->checkint, conf->max_checkint);
2897                 use_watchdog = conf->checkint;
2898         }
2899 #endif
2900         /*
2901          * Startup done, invalidate configuration
2902          */
2903         conf = NULL;
2904
2905         pthread_cleanup_push(config_cleanup, NULL);
2906         pthread_mutex_lock(&config_lock);
2907
2908         __post_config_state(DAEMON_IDLE);
2909         rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
2910         if (!rc) {
2911                 /* Wait for uxlsnr startup */
2912                 while (running_state == DAEMON_IDLE)
2913                         pthread_cond_wait(&config_cond, &config_lock);
2914                 state = running_state;
2915         }
2916         pthread_cleanup_pop(1);
2917
2918         if (rc) {
2919                 condlog(0, "failed to create cli listener: %d", rc);
2920                 goto failed;
2921         }
2922         else if (state != DAEMON_CONFIGURE) {
2923                 condlog(0, "cli listener failed to start");
2924                 goto failed;
2925         }
2926
2927         if (poll_dmevents) {
2928                 if (init_dmevent_waiter(vecs)) {
2929                         condlog(0, "failed to allocate dmevents waiter info");
2930                         goto failed;
2931                 }
2932                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2933                                          wait_dmevents, NULL))) {
2934                         condlog(0, "failed to create dmevent waiter thread: %d",
2935                                 rc);
2936                         goto failed;
2937                 }
2938         }
2939
2940         /*
2941          * Start uevent listener early to catch events
2942          */
2943         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2944                 condlog(0, "failed to create uevent thread: %d", rc);
2945                 goto failed;
2946         }
2947         pthread_attr_destroy(&uevent_attr);
2948
2949         /*
2950          * start threads
2951          */
2952         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2953                 condlog(0,"failed to create checker loop thread: %d", rc);
2954                 goto failed;
2955         }
2956         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2957                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2958                 goto failed;
2959         }
2960         pthread_attr_destroy(&misc_attr);
2961
2962         while (1) {
2963                 pthread_cleanup_push(config_cleanup, NULL);
2964                 pthread_mutex_lock(&config_lock);
2965                 while (running_state != DAEMON_CONFIGURE &&
2966                        running_state != DAEMON_SHUTDOWN)
2967                         pthread_cond_wait(&config_cond, &config_lock);
2968                 state = running_state;
2969                 pthread_cleanup_pop(1);
2970                 if (state == DAEMON_SHUTDOWN)
2971                         break;
2972                 if (state == DAEMON_CONFIGURE) {
2973                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2974                         lock(&vecs->lock);
2975                         pthread_testcancel();
2976                         if (!need_to_delay_reconfig(vecs)) {
2977                                 reconfigure(vecs);
2978                         } else {
2979                                 conf = get_multipath_config();
2980                                 conf->delayed_reconfig = 1;
2981                                 put_multipath_config(conf);
2982                         }
2983                         lock_cleanup_pop(vecs->lock);
2984                         post_config_state(DAEMON_IDLE);
2985 #ifdef USE_SYSTEMD
2986                         if (!startup_done) {
2987                                 sd_notify(0, "READY=1");
2988                                 startup_done = 1;
2989                         }
2990 #endif
2991                 }
2992         }
2993
2994         lock(&vecs->lock);
2995         conf = get_multipath_config();
2996         queue_without_daemon = conf->queue_without_daemon;
2997         put_multipath_config(conf);
2998         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2999                 vector_foreach_slot(vecs->mpvec, mpp, i)
3000                         dm_queue_if_no_path(mpp->alias, 0);
3001         remove_maps_and_stop_waiters(vecs);
3002         unlock(&vecs->lock);
3003
3004         pthread_cancel(check_thr);
3005         pthread_cancel(uevent_thr);
3006         pthread_cancel(uxlsnr_thr);
3007         pthread_cancel(uevq_thr);
3008         if (poll_dmevents)
3009                 pthread_cancel(dmevent_thr);
3010
3011         pthread_join(check_thr, NULL);
3012         pthread_join(uevent_thr, NULL);
3013         pthread_join(uxlsnr_thr, NULL);
3014         pthread_join(uevq_thr, NULL);
3015         if (poll_dmevents)
3016                 pthread_join(dmevent_thr, NULL);
3017
3018         stop_io_err_stat_thread();
3019
3020         lock(&vecs->lock);
3021         free_pathvec(vecs->pathvec, FREE_PATHS);
3022         vecs->pathvec = NULL;
3023         unlock(&vecs->lock);
3024
3025         pthread_mutex_destroy(&vecs->lock.mutex);
3026         FREE(vecs);
3027         vecs = NULL;
3028
3029         cleanup_foreign();
3030         cleanup_checkers();
3031         cleanup_prio();
3032         if (poll_dmevents)
3033                 cleanup_dmevent_waiter();
3034
3035         dm_lib_release();
3036         dm_lib_exit();
3037
3038         /* We're done here */
3039         condlog(3, "unlink pidfile");
3040         unlink(DEFAULT_PIDFILE);
3041
3042         condlog(2, "--------shut down-------");
3043
3044         if (logsink == 1)
3045                 log_thread_stop();
3046
3047         /*
3048          * Freeing config must be done after condlog() and dm_lib_exit(),
3049          * because logging functions like dlog() and dm_write_log()
3050          * reference the config.
3051          */
3052         conf = rcu_dereference(multipath_conf);
3053         rcu_assign_pointer(multipath_conf, NULL);
3054         call_rcu(&conf->rcu, rcu_free_config);
3055         udev_unref(udev);
3056         udev = NULL;
3057         pthread_attr_destroy(&waiter_attr);
3058         pthread_attr_destroy(&io_err_stat_attr);
3059 #ifdef _DEBUG_
3060         dbg_free_final(NULL);
3061 #endif
3062
3063 #ifdef USE_SYSTEMD
3064         sd_notify(0, "ERRNO=0");
3065 #endif
3066         exit(0);
3067
3068 failed:
3069 #ifdef USE_SYSTEMD
3070         sd_notify(0, "ERRNO=1");
3071 #endif
3072         if (pid_fd >= 0)
3073                 close(pid_fd);
3074         exit(1);
3075 }
3076
3077 static int
3078 daemonize(void)
3079 {
3080         int pid;
3081         int dev_null_fd;
3082
3083         if( (pid = fork()) < 0){
3084                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
3085                 return -1;
3086         }
3087         else if (pid != 0)
3088                 return pid;
3089
3090         setsid();
3091
3092         if ( (pid = fork()) < 0)
3093                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
3094         else if (pid != 0)
3095                 _exit(0);
3096
3097         if (chdir("/") < 0)
3098                 fprintf(stderr, "cannot chdir to '/', continuing\n");
3099
3100         dev_null_fd = open("/dev/null", O_RDWR);
3101         if (dev_null_fd < 0){
3102                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
3103                         strerror(errno));
3104                 _exit(0);
3105         }
3106
3107         close(STDIN_FILENO);
3108         if (dup(dev_null_fd) < 0) {
3109                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
3110                         strerror(errno));
3111                 _exit(0);
3112         }
3113         close(STDOUT_FILENO);
3114         if (dup(dev_null_fd) < 0) {
3115                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
3116                         strerror(errno));
3117                 _exit(0);
3118         }
3119         close(STDERR_FILENO);
3120         if (dup(dev_null_fd) < 0) {
3121                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
3122                         strerror(errno));
3123                 _exit(0);
3124         }
3125         close(dev_null_fd);
3126         daemon_pid = getpid();
3127         return 0;
3128 }
3129
3130 int
3131 main (int argc, char *argv[])
3132 {
3133         extern char *optarg;
3134         extern int optind;
3135         int arg;
3136         int err;
3137         int foreground = 0;
3138         struct config *conf;
3139
3140         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
3141                                    "Manipulated through RCU");
3142         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
3143                 "Suppress complaints about this scalar variable");
3144
3145         logsink = 1;
3146
3147         if (getuid() != 0) {
3148                 fprintf(stderr, "need to be root\n");
3149                 exit(1);
3150         }
3151
3152         /* make sure we don't lock any path */
3153         if (chdir("/") < 0)
3154                 fprintf(stderr, "can't chdir to root directory : %s\n",
3155                         strerror(errno));
3156         umask(umask(077) | 022);
3157
3158         pthread_cond_init_mono(&config_cond);
3159
3160         udev = udev_new();
3161         libmp_udev_set_sync_support(0);
3162
3163         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
3164                 switch(arg) {
3165                 case 'd':
3166                         foreground = 1;
3167                         if (logsink > 0)
3168                                 logsink = 0;
3169                         //debug=1; /* ### comment me out ### */
3170                         break;
3171                 case 'v':
3172                         if (sizeof(optarg) > sizeof(char *) ||
3173                             !isdigit(optarg[0]))
3174                                 exit(1);
3175
3176                         verbosity = atoi(optarg);
3177                         break;
3178                 case 's':
3179                         logsink = -1;
3180                         break;
3181                 case 'k':
3182                         logsink = 0;
3183                         conf = load_config(DEFAULT_CONFIGFILE);
3184                         if (!conf)
3185                                 exit(1);
3186                         if (verbosity)
3187                                 conf->verbosity = verbosity;
3188                         uxsock_timeout = conf->uxsock_timeout;
3189                         err = uxclnt(optarg, uxsock_timeout + 100);
3190                         free_config(conf);
3191                         return err;
3192                 case 'B':
3193                         bindings_read_only = 1;
3194                         break;
3195                 case 'n':
3196                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3197                         break;
3198                 case 'w':
3199                         poll_dmevents = 0;
3200                         break;
3201                 default:
3202                         fprintf(stderr, "Invalid argument '-%c'\n",
3203                                 optopt);
3204                         exit(1);
3205                 }
3206         }
3207         if (optind < argc) {
3208                 char cmd[CMDSIZE];
3209                 char * s = cmd;
3210                 char * c = s;
3211
3212                 logsink = 0;
3213                 conf = load_config(DEFAULT_CONFIGFILE);
3214                 if (!conf)
3215                         exit(1);
3216                 if (verbosity)
3217                         conf->verbosity = verbosity;
3218                 uxsock_timeout = conf->uxsock_timeout;
3219                 memset(cmd, 0x0, CMDSIZE);
3220                 while (optind < argc) {
3221                         if (strchr(argv[optind], ' '))
3222                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3223                         else
3224                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3225                         optind++;
3226                 }
3227                 c += snprintf(c, s + CMDSIZE - c, "\n");
3228                 err = uxclnt(s, uxsock_timeout + 100);
3229                 free_config(conf);
3230                 return err;
3231         }
3232
3233         if (foreground) {
3234                 if (!isatty(fileno(stdout)))
3235                         setbuf(stdout, NULL);
3236                 err = 0;
3237                 daemon_pid = getpid();
3238         } else
3239                 err = daemonize();
3240
3241         if (err < 0)
3242                 /* error */
3243                 exit(1);
3244         else if (err > 0)
3245                 /* parent dies */
3246                 exit(0);
3247         else
3248                 /* child lives */
3249                 return (child(NULL));
3250 }
3251
3252 void *  mpath_pr_event_handler_fn (void * pathp )
3253 {
3254         struct multipath * mpp;
3255         unsigned int i;
3256         int ret, isFound;
3257         struct path * pp = (struct path *)pathp;
3258         struct prout_param_descriptor *param;
3259         struct prin_resp *resp;
3260
3261         rcu_register_thread();
3262         mpp = pp->mpp;
3263
3264         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3265         if (!resp){
3266                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3267                 goto out;
3268         }
3269
3270         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3271         if (ret != MPATH_PR_SUCCESS )
3272         {
3273                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3274                 goto out;
3275         }
3276
3277         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3278                         resp->prin_descriptor.prin_readkeys.additional_length );
3279
3280         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3281         {
3282                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3283                 ret = MPATH_PR_SUCCESS;
3284                 goto out;
3285         }
3286         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3287                 get_be64(mpp->reservation_key));
3288
3289         isFound =0;
3290         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3291         {
3292                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3293                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3294                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3295                 {
3296                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3297                         isFound =1;
3298                         break;
3299                 }
3300         }
3301         if (!isFound)
3302         {
3303                 condlog(0, "%s: Either device not registered or ", pp->dev);
3304                 condlog(0, "host is not authorised for registration. Skip path");
3305                 ret = MPATH_PR_OTHER;
3306                 goto out;
3307         }
3308
3309         param= malloc(sizeof(struct prout_param_descriptor));
3310         memset(param, 0 , sizeof(struct prout_param_descriptor));
3311         param->sa_flags = mpp->sa_flags;
3312         memcpy(param->sa_key, &mpp->reservation_key, 8);
3313         param->num_transportid = 0;
3314
3315         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3316
3317         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3318         if (ret != MPATH_PR_SUCCESS )
3319         {
3320                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3321         }
3322         mpp->prflag = 1;
3323
3324         free(param);
3325 out:
3326         if (resp)
3327                 free(resp);
3328         rcu_unregister_thread();
3329         return NULL;
3330 }
3331
3332 int mpath_pr_event_handle(struct path *pp)
3333 {
3334         pthread_t thread;
3335         int rc;
3336         pthread_attr_t attr;
3337         struct multipath * mpp;
3338
3339         if (pp->bus != SYSFS_BUS_SCSI)
3340                 return 0;
3341
3342         mpp = pp->mpp;
3343
3344         if (!get_be64(mpp->reservation_key))
3345                 return -1;
3346
3347         pthread_attr_init(&attr);
3348         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3349
3350         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3351         if (rc) {
3352                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3353                 return -1;
3354         }
3355         pthread_attr_destroy(&attr);
3356         rc = pthread_join(thread, NULL);
3357         return 0;
3358 }