multipathd: simplify retry logic in ev_add_path()
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69 #include "uxsock.h"
70
71 #include "mpath_cmd.h"
72 #include "mpath_persist.h"
73
74 #include "prioritizers/alua_rtpg.h"
75
76 #include "main.h"
77 #include "pidfile.h"
78 #include "uxlsnr.h"
79 #include "uxclnt.h"
80 #include "cli.h"
81 #include "cli_handlers.h"
82 #include "lock.h"
83 #include "waiter.h"
84 #include "dmevents.h"
85 #include "io_err_stat.h"
86 #include "wwids.h"
87 #include "foreign.h"
88 #include "../third-party/valgrind/drd.h"
89
90 #define FILE_NAME_SIZE 256
91 #define CMDSIZE 160
92
93 #define LOG_MSG(lvl, verb, pp)                                  \
94 do {                                                            \
95         if (lvl <= verb) {                                      \
96                 if (pp->offline)                                \
97                         condlog(lvl, "%s: %s - path offline",   \
98                                 pp->mpp->alias, pp->dev);       \
99                 else  {                                         \
100                         const char *__m =                       \
101                                 checker_message(&pp->checker);  \
102                                                                 \
103                         if (strlen(__m))                              \
104                                 condlog(lvl, "%s: %s - %s checker%s", \
105                                         pp->mpp->alias,               \
106                                         pp->dev,                      \
107                                         checker_name(&pp->checker),   \
108                                         __m);                         \
109                 }                                                     \
110         }                                                             \
111 } while(0)
112
113 struct mpath_event_param
114 {
115         char * devname;
116         struct multipath *mpp;
117 };
118
119 int logsink;
120 int uxsock_timeout;
121 int verbosity;
122 int bindings_read_only;
123 int ignore_new_devs;
124 #ifdef NO_DMEVENTS_POLL
125 int poll_dmevents = 0;
126 #else
127 int poll_dmevents = 1;
128 #endif
129 enum daemon_status running_state = DAEMON_INIT;
130 pid_t daemon_pid;
131 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
132 pthread_cond_t config_cond;
133
134 /*
135  * global copy of vecs for use in sig handlers
136  */
137 struct vectors * gvecs;
138
139 struct udev * udev;
140
141 struct config *multipath_conf;
142
143 /* Local variables */
144 static volatile sig_atomic_t exit_sig;
145 static volatile sig_atomic_t reconfig_sig;
146 static volatile sig_atomic_t log_reset_sig;
147
148 const char *
149 daemon_status(void)
150 {
151         switch (running_state) {
152         case DAEMON_INIT:
153                 return "init";
154         case DAEMON_START:
155                 return "startup";
156         case DAEMON_CONFIGURE:
157                 return "configure";
158         case DAEMON_IDLE:
159                 return "idle";
160         case DAEMON_RUNNING:
161                 return "running";
162         case DAEMON_SHUTDOWN:
163                 return "shutdown";
164         }
165         return NULL;
166 }
167
168 /*
169  * I love you too, systemd ...
170  */
171 const char *
172 sd_notify_status(void)
173 {
174         switch (running_state) {
175         case DAEMON_INIT:
176                 return "STATUS=init";
177         case DAEMON_START:
178                 return "STATUS=startup";
179         case DAEMON_CONFIGURE:
180                 return "STATUS=configure";
181         case DAEMON_IDLE:
182         case DAEMON_RUNNING:
183                 return "STATUS=up";
184         case DAEMON_SHUTDOWN:
185                 return "STATUS=shutdown";
186         }
187         return NULL;
188 }
189
190 #ifdef USE_SYSTEMD
191 static void do_sd_notify(enum daemon_status old_state)
192 {
193         /*
194          * Checkerloop switches back and forth between idle and running state.
195          * No need to tell systemd each time.
196          * These notifications cause a lot of overhead on dbus.
197          */
198         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
199             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
200                 return;
201         sd_notify(0, sd_notify_status());
202 }
203 #endif
204
205 static void config_cleanup(void *arg)
206 {
207         pthread_mutex_unlock(&config_lock);
208 }
209
210 static void __post_config_state(enum daemon_status state)
211 {
212         if (state != running_state && running_state != DAEMON_SHUTDOWN) {
213                 enum daemon_status old_state = running_state;
214
215                 running_state = state;
216                 pthread_cond_broadcast(&config_cond);
217 #ifdef USE_SYSTEMD
218                 do_sd_notify(old_state);
219 #endif
220         }
221 }
222
223 void post_config_state(enum daemon_status state)
224 {
225         pthread_mutex_lock(&config_lock);
226         pthread_cleanup_push(config_cleanup, NULL);
227         __post_config_state(state);
228         pthread_cleanup_pop(1);
229 }
230
231 int set_config_state(enum daemon_status state)
232 {
233         int rc = 0;
234
235         pthread_cleanup_push(config_cleanup, NULL);
236         pthread_mutex_lock(&config_lock);
237         if (running_state != state) {
238                 enum daemon_status old_state = running_state;
239
240                 if (running_state == DAEMON_SHUTDOWN)
241                         rc = EINVAL;
242                 else if (running_state != DAEMON_IDLE) {
243                         struct timespec ts;
244
245                         clock_gettime(CLOCK_MONOTONIC, &ts);
246                         ts.tv_sec += 1;
247                         rc = pthread_cond_timedwait(&config_cond,
248                                                     &config_lock, &ts);
249                 }
250                 if (!rc) {
251                         running_state = state;
252                         pthread_cond_broadcast(&config_cond);
253 #ifdef USE_SYSTEMD
254                         do_sd_notify(old_state);
255 #endif
256                 }
257         }
258         pthread_cleanup_pop(1);
259         return rc;
260 }
261
262 struct config *get_multipath_config(void)
263 {
264         rcu_read_lock();
265         return rcu_dereference(multipath_conf);
266 }
267
268 void put_multipath_config(void *arg)
269 {
270         rcu_read_unlock();
271 }
272
273 static int
274 need_switch_pathgroup (struct multipath * mpp, int refresh)
275 {
276         struct pathgroup * pgp;
277         struct path * pp;
278         unsigned int i, j;
279         struct config *conf;
280         int bestpg;
281
282         if (!mpp)
283                 return 0;
284
285         /*
286          * Refresh path priority values
287          */
288         if (refresh) {
289                 vector_foreach_slot (mpp->pg, pgp, i) {
290                         vector_foreach_slot (pgp->paths, pp, j) {
291                                 conf = get_multipath_config();
292                                 pthread_cleanup_push(put_multipath_config,
293                                                      conf);
294                                 pathinfo(pp, conf, DI_PRIO);
295                                 pthread_cleanup_pop(1);
296                         }
297                 }
298         }
299
300         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
301                 return 0;
302
303         bestpg = select_path_group(mpp);
304         if (mpp->pgfailback == -FAILBACK_MANUAL)
305                 return 0;
306
307         mpp->bestpg = bestpg;
308         if (mpp->bestpg != mpp->nextpg)
309                 return 1;
310
311         return 0;
312 }
313
314 static void
315 switch_pathgroup (struct multipath * mpp)
316 {
317         mpp->stat_switchgroup++;
318         dm_switchgroup(mpp->alias, mpp->bestpg);
319         condlog(2, "%s: switch to path group #%i",
320                  mpp->alias, mpp->bestpg);
321 }
322
323 static int
324 wait_for_events(struct multipath *mpp, struct vectors *vecs)
325 {
326         if (poll_dmevents)
327                 return watch_dmevents(mpp->alias);
328         else
329                 return start_waiter_thread(mpp, vecs);
330 }
331
332 static void
333 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
334 {
335         /* devices are automatically removed by the dmevent polling code,
336          * so they don't need to be manually removed here */
337         condlog(3, "%s: removing map from internal tables", mpp->alias);
338         if (!poll_dmevents)
339                 stop_waiter_thread(mpp, vecs);
340         remove_map(mpp, vecs, PURGE_VEC);
341 }
342
343 static void
344 remove_maps_and_stop_waiters(struct vectors *vecs)
345 {
346         int i;
347         struct multipath * mpp;
348
349         if (!vecs)
350                 return;
351
352         if (!poll_dmevents) {
353                 vector_foreach_slot(vecs->mpvec, mpp, i)
354                         stop_waiter_thread(mpp, vecs);
355         }
356         else
357                 unwatch_all_dmevents();
358
359         remove_maps(vecs);
360 }
361
362 static void
363 set_multipath_wwid (struct multipath * mpp)
364 {
365         if (strlen(mpp->wwid))
366                 return;
367
368         dm_get_uuid(mpp->alias, mpp->wwid);
369 }
370
371 static void set_no_path_retry(struct multipath *mpp)
372 {
373         char is_queueing = 0;
374
375         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
376         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
377                 is_queueing = 1;
378
379         switch (mpp->no_path_retry) {
380         case NO_PATH_RETRY_UNDEF:
381                 break;
382         case NO_PATH_RETRY_FAIL:
383                 if (is_queueing)
384                         dm_queue_if_no_path(mpp->alias, 0);
385                 break;
386         case NO_PATH_RETRY_QUEUE:
387                 if (!is_queueing)
388                         dm_queue_if_no_path(mpp->alias, 1);
389                 break;
390         default:
391                 if (mpp->nr_active > 0) {
392                         mpp->retry_tick = 0;
393                         dm_queue_if_no_path(mpp->alias, 1);
394                 } else if (is_queueing && mpp->retry_tick == 0)
395                         enter_recovery_mode(mpp);
396                 break;
397         }
398 }
399
400 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
401                       int reset)
402 {
403         if (dm_get_info(mpp->alias, &mpp->dmi)) {
404                 /* Error accessing table */
405                 condlog(3, "%s: cannot access table", mpp->alias);
406                 goto out;
407         }
408
409         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
410                 condlog(0, "%s: failed to setup multipath", mpp->alias);
411                 goto out;
412         }
413
414         if (reset) {
415                 set_no_path_retry(mpp);
416                 if (VECTOR_SIZE(mpp->paths) != 0)
417                         dm_cancel_deferred_remove(mpp);
418         }
419
420         return 0;
421 out:
422         remove_map_and_stop_waiter(mpp, vecs);
423         return 1;
424 }
425
426 int update_multipath (struct vectors *vecs, char *mapname, int reset)
427 {
428         struct multipath *mpp;
429         struct pathgroup  *pgp;
430         struct path *pp;
431         int i, j;
432
433         mpp = find_mp_by_alias(vecs->mpvec, mapname);
434
435         if (!mpp) {
436                 condlog(3, "%s: multipath map not found", mapname);
437                 return 2;
438         }
439
440         if (__setup_multipath(vecs, mpp, reset))
441                 return 1; /* mpp freed in setup_multipath */
442
443         /*
444          * compare checkers states with DM states
445          */
446         vector_foreach_slot (mpp->pg, pgp, i) {
447                 vector_foreach_slot (pgp->paths, pp, j) {
448                         if (pp->dmstate != PSTATE_FAILED)
449                                 continue;
450
451                         if (pp->state != PATH_DOWN) {
452                                 struct config *conf;
453                                 int oldstate = pp->state;
454                                 int checkint;
455
456                                 conf = get_multipath_config();
457                                 checkint = conf->checkint;
458                                 put_multipath_config(conf);
459                                 condlog(2, "%s: mark as failed", pp->dev);
460                                 mpp->stat_path_failures++;
461                                 pp->state = PATH_DOWN;
462                                 if (oldstate == PATH_UP ||
463                                     oldstate == PATH_GHOST)
464                                         update_queue_mode_del_path(mpp);
465
466                                 /*
467                                  * if opportune,
468                                  * schedule the next check earlier
469                                  */
470                                 if (pp->tick > checkint)
471                                         pp->tick = checkint;
472                         }
473                 }
474         }
475         return 0;
476 }
477
478 static int
479 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
480 {
481         int retries = 3;
482         char params[PARAMS_SIZE] = {0};
483
484 retry:
485         condlog(4, "%s: updating new map", mpp->alias);
486         if (adopt_paths(vecs->pathvec, mpp)) {
487                 condlog(0, "%s: failed to adopt paths for new map update",
488                         mpp->alias);
489                 retries = -1;
490                 goto fail;
491         }
492         verify_paths(mpp, vecs);
493         mpp->action = ACT_RELOAD;
494
495         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
496                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
497                 retries = -1;
498                 goto fail;
499         }
500         if (domap(mpp, params, 1) == DOMAP_FAIL && retries-- > 0) {
501                 condlog(0, "%s: map_udate sleep", mpp->alias);
502                 sleep(1);
503                 goto retry;
504         }
505         dm_lib_release();
506
507 fail:
508         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
509                 condlog(0, "%s: failed to create new map", mpp->alias);
510                 remove_map(mpp, vecs, 1);
511                 return 1;
512         }
513
514         if (setup_multipath(vecs, mpp))
515                 return 1;
516
517         sync_map_state(mpp);
518
519         if (retries < 0)
520                 condlog(0, "%s: failed reload in new map update", mpp->alias);
521         return 0;
522 }
523
524 static struct multipath *
525 add_map_without_path (struct vectors *vecs, const char *alias)
526 {
527         struct multipath * mpp = alloc_multipath();
528         struct config *conf;
529
530         if (!mpp)
531                 return NULL;
532         if (!alias) {
533                 FREE(mpp);
534                 return NULL;
535         }
536
537         mpp->alias = STRDUP(alias);
538
539         if (dm_get_info(mpp->alias, &mpp->dmi)) {
540                 condlog(3, "%s: cannot access table", mpp->alias);
541                 goto out;
542         }
543         set_multipath_wwid(mpp);
544         conf = get_multipath_config();
545         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
546         put_multipath_config(conf);
547
548         if (update_multipath_table(mpp, vecs->pathvec, 1))
549                 goto out;
550         if (update_multipath_status(mpp))
551                 goto out;
552
553         if (!vector_alloc_slot(vecs->mpvec))
554                 goto out;
555
556         vector_set_slot(vecs->mpvec, mpp);
557
558         if (update_map(mpp, vecs, 1) != 0) /* map removed */
559                 return NULL;
560
561         return mpp;
562 out:
563         remove_map(mpp, vecs, PURGE_VEC);
564         return NULL;
565 }
566
567 static int
568 coalesce_maps(struct vectors *vecs, vector nmpv)
569 {
570         struct multipath * ompp;
571         vector ompv = vecs->mpvec;
572         unsigned int i, reassign_maps;
573         struct config *conf;
574
575         conf = get_multipath_config();
576         reassign_maps = conf->reassign_maps;
577         put_multipath_config(conf);
578         vector_foreach_slot (ompv, ompp, i) {
579                 condlog(3, "%s: coalesce map", ompp->alias);
580                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
581                         /*
582                          * remove all current maps not allowed by the
583                          * current configuration
584                          */
585                         if (dm_flush_map(ompp->alias)) {
586                                 condlog(0, "%s: unable to flush devmap",
587                                         ompp->alias);
588                                 /*
589                                  * may be just because the device is open
590                                  */
591                                 if (setup_multipath(vecs, ompp) != 0) {
592                                         i--;
593                                         continue;
594                                 }
595                                 if (!vector_alloc_slot(nmpv))
596                                         return 1;
597
598                                 vector_set_slot(nmpv, ompp);
599
600                                 vector_del_slot(ompv, i);
601                                 i--;
602                         }
603                         else {
604                                 dm_lib_release();
605                                 condlog(2, "%s devmap removed", ompp->alias);
606                         }
607                 } else if (reassign_maps) {
608                         condlog(3, "%s: Reassign existing device-mapper"
609                                 " devices", ompp->alias);
610                         dm_reassign(ompp->alias);
611                 }
612         }
613         return 0;
614 }
615
616 static void
617 sync_maps_state(vector mpvec)
618 {
619         unsigned int i;
620         struct multipath *mpp;
621
622         vector_foreach_slot (mpvec, mpp, i)
623                 sync_map_state(mpp);
624 }
625
626 static int
627 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
628 {
629         int r;
630
631         if (nopaths)
632                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
633         else
634                 r = dm_flush_map(mpp->alias);
635         /*
636          * clear references to this map before flushing so we can ignore
637          * the spurious uevent we may generate with the dm_flush_map call below
638          */
639         if (r) {
640                 /*
641                  * May not really be an error -- if the map was already flushed
642                  * from the device mapper by dmsetup(8) for instance.
643                  */
644                 if (r == 1)
645                         condlog(0, "%s: can't flush", mpp->alias);
646                 else {
647                         condlog(2, "%s: devmap deferred remove", mpp->alias);
648                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
649                 }
650                 return r;
651         }
652         else {
653                 dm_lib_release();
654                 condlog(2, "%s: map flushed", mpp->alias);
655         }
656
657         orphan_paths(vecs->pathvec, mpp, "map flushed");
658         remove_map_and_stop_waiter(mpp, vecs);
659
660         return 0;
661 }
662
663 static int
664 uev_add_map (struct uevent * uev, struct vectors * vecs)
665 {
666         char *alias;
667         int major = -1, minor = -1, rc;
668
669         condlog(3, "%s: add map (uevent)", uev->kernel);
670         alias = uevent_get_dm_name(uev);
671         if (!alias) {
672                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
673                 major = uevent_get_major(uev);
674                 minor = uevent_get_minor(uev);
675                 alias = dm_mapname(major, minor);
676                 if (!alias) {
677                         condlog(2, "%s: mapname not found for %d:%d",
678                                 uev->kernel, major, minor);
679                         return 1;
680                 }
681         }
682         pthread_cleanup_push(cleanup_lock, &vecs->lock);
683         lock(&vecs->lock);
684         pthread_testcancel();
685         rc = ev_add_map(uev->kernel, alias, vecs);
686         lock_cleanup_pop(vecs->lock);
687         FREE(alias);
688         return rc;
689 }
690
691 /*
692  * ev_add_map expects that the multipath device already exists in kernel
693  * before it is called. It just adds a device to multipathd or updates an
694  * existing device.
695  */
696 int
697 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
698 {
699         struct multipath * mpp;
700         int delayed_reconfig, reassign_maps;
701         struct config *conf;
702
703         if (dm_is_mpath(alias) != 1) {
704                 condlog(4, "%s: not a multipath map", alias);
705                 return 0;
706         }
707
708         mpp = find_mp_by_alias(vecs->mpvec, alias);
709
710         if (mpp) {
711                 if (mpp->wait_for_udev > 1) {
712                         condlog(2, "%s: performing delayed actions",
713                                 mpp->alias);
714                         if (update_map(mpp, vecs, 0))
715                                 /* setup multipathd removed the map */
716                                 return 1;
717                 }
718                 conf = get_multipath_config();
719                 delayed_reconfig = conf->delayed_reconfig;
720                 reassign_maps = conf->reassign_maps;
721                 put_multipath_config(conf);
722                 if (mpp->wait_for_udev) {
723                         mpp->wait_for_udev = 0;
724                         if (delayed_reconfig &&
725                             !need_to_delay_reconfig(vecs)) {
726                                 condlog(2, "reconfigure (delayed)");
727                                 set_config_state(DAEMON_CONFIGURE);
728                                 return 0;
729                         }
730                 }
731                 /*
732                  * Not really an error -- we generate our own uevent
733                  * if we create a multipath mapped device as a result
734                  * of uev_add_path
735                  */
736                 if (reassign_maps) {
737                         condlog(3, "%s: Reassign existing device-mapper devices",
738                                 alias);
739                         dm_reassign(alias);
740                 }
741                 return 0;
742         }
743         condlog(2, "%s: adding map", alias);
744
745         /*
746          * now we can register the map
747          */
748         if ((mpp = add_map_without_path(vecs, alias))) {
749                 sync_map_state(mpp);
750                 condlog(2, "%s: devmap %s registered", alias, dev);
751                 return 0;
752         } else {
753                 condlog(2, "%s: ev_add_map failed", dev);
754                 return 1;
755         }
756 }
757
758 static int
759 uev_remove_map (struct uevent * uev, struct vectors * vecs)
760 {
761         char *alias;
762         int minor;
763         struct multipath *mpp;
764
765         condlog(3, "%s: remove map (uevent)", uev->kernel);
766         alias = uevent_get_dm_name(uev);
767         if (!alias) {
768                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
769                 return 0;
770         }
771         minor = uevent_get_minor(uev);
772
773         pthread_cleanup_push(cleanup_lock, &vecs->lock);
774         lock(&vecs->lock);
775         pthread_testcancel();
776         mpp = find_mp_by_minor(vecs->mpvec, minor);
777
778         if (!mpp) {
779                 condlog(2, "%s: devmap not registered, can't remove",
780                         uev->kernel);
781                 goto out;
782         }
783         if (strcmp(mpp->alias, alias)) {
784                 condlog(2, "%s: map alias mismatch: have \"%s\", got \"%s\")",
785                         uev->kernel, mpp->alias, alias);
786                 goto out;
787         }
788
789         remove_map_and_stop_waiter(mpp, vecs);
790 out:
791         lock_cleanup_pop(vecs->lock);
792         FREE(alias);
793         return 0;
794 }
795
796 /* Called from CLI handler */
797 int
798 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
799 {
800         struct multipath * mpp;
801
802         mpp = find_mp_by_minor(vecs->mpvec, minor);
803
804         if (!mpp) {
805                 condlog(2, "%s: devmap not registered, can't remove",
806                         devname);
807                 return 1;
808         }
809         if (strcmp(mpp->alias, alias)) {
810                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
811                         mpp->alias, mpp->dmi->minor, minor);
812                 return 1;
813         }
814         return flush_map(mpp, vecs, 0);
815 }
816
817 static int
818 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
819 {
820         struct path *pp;
821         int ret = 0, i;
822         struct config *conf;
823
824         condlog(3, "%s: add path (uevent)", uev->kernel);
825         if (strstr(uev->kernel, "..") != NULL) {
826                 /*
827                  * Don't allow relative device names in the pathvec
828                  */
829                 condlog(0, "%s: path name is invalid", uev->kernel);
830                 return 1;
831         }
832
833         pthread_cleanup_push(cleanup_lock, &vecs->lock);
834         lock(&vecs->lock);
835         pthread_testcancel();
836         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
837         if (pp) {
838                 int r;
839
840                 condlog(3, "%s: spurious uevent, path already in pathvec",
841                         uev->kernel);
842                 if (!pp->mpp && !strlen(pp->wwid)) {
843                         condlog(3, "%s: reinitialize path", uev->kernel);
844                         udev_device_unref(pp->udev);
845                         pp->udev = udev_device_ref(uev->udev);
846                         conf = get_multipath_config();
847                         pthread_cleanup_push(put_multipath_config, conf);
848                         r = pathinfo(pp, conf,
849                                      DI_ALL | DI_BLACKLIST);
850                         pthread_cleanup_pop(1);
851                         if (r == PATHINFO_OK)
852                                 ret = ev_add_path(pp, vecs, need_do_map);
853                         else if (r == PATHINFO_SKIPPED) {
854                                 condlog(3, "%s: remove blacklisted path",
855                                         uev->kernel);
856                                 i = find_slot(vecs->pathvec, (void *)pp);
857                                 if (i != -1)
858                                         vector_del_slot(vecs->pathvec, i);
859                                 free_path(pp);
860                         } else {
861                                 condlog(0, "%s: failed to reinitialize path",
862                                         uev->kernel);
863                                 ret = 1;
864                         }
865                 }
866         }
867         lock_cleanup_pop(vecs->lock);
868         if (pp)
869                 return ret;
870
871         /*
872          * get path vital state
873          */
874         conf = get_multipath_config();
875         pthread_cleanup_push(put_multipath_config, conf);
876         ret = alloc_path_with_pathinfo(conf, uev->udev,
877                                        uev->wwid, DI_ALL, &pp);
878         pthread_cleanup_pop(1);
879         if (!pp) {
880                 if (ret == PATHINFO_SKIPPED)
881                         return 0;
882                 condlog(3, "%s: failed to get path info", uev->kernel);
883                 return 1;
884         }
885         pthread_cleanup_push(cleanup_lock, &vecs->lock);
886         lock(&vecs->lock);
887         pthread_testcancel();
888         ret = store_path(vecs->pathvec, pp);
889         if (!ret) {
890                 conf = get_multipath_config();
891                 pp->checkint = conf->checkint;
892                 put_multipath_config(conf);
893                 ret = ev_add_path(pp, vecs, need_do_map);
894         } else {
895                 condlog(0, "%s: failed to store path info, "
896                         "dropping event",
897                         uev->kernel);
898                 free_path(pp);
899                 ret = 1;
900         }
901         lock_cleanup_pop(vecs->lock);
902         return ret;
903 }
904
905 /*
906  * returns:
907  * 0: added
908  * 1: error
909  */
910 int
911 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
912 {
913         struct multipath * mpp;
914         char params[PARAMS_SIZE] = {0};
915         int retries = 3;
916         int start_waiter = 0;
917         int ret;
918
919         /*
920          * need path UID to go any further
921          */
922         if (strlen(pp->wwid) == 0) {
923                 condlog(0, "%s: failed to get path uid", pp->dev);
924                 goto fail; /* leave path added to pathvec */
925         }
926         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
927         if (mpp && pp->size && mpp->size != pp->size) {
928                 condlog(0, "%s: failed to add new path %s, device size mismatch", mpp->alias, pp->dev);
929                 int i = find_slot(vecs->pathvec, (void *)pp);
930                 if (i != -1)
931                         vector_del_slot(vecs->pathvec, i);
932                 free_path(pp);
933                 return 1;
934         }
935         if (mpp && mpp->wait_for_udev &&
936             (pathcount(mpp, PATH_UP) > 0 ||
937              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
938               mpp->ghost_delay_tick <= 0))) {
939                 /* if wait_for_udev is set and valid paths exist */
940                 condlog(3, "%s: delaying path addition until %s is fully initialized",
941                         pp->dev, mpp->alias);
942                 mpp->wait_for_udev = 2;
943                 orphan_path(pp, "waiting for create to complete");
944                 return 0;
945         }
946
947         pp->mpp = mpp;
948 rescan:
949         if (mpp) {
950                 condlog(4,"%s: adopting all paths for path %s",
951                         mpp->alias, pp->dev);
952                 if (adopt_paths(vecs->pathvec, mpp))
953                         goto fail; /* leave path added to pathvec */
954
955                 verify_paths(mpp, vecs);
956                 mpp->action = ACT_RELOAD;
957         } else {
958                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
959                         orphan_path(pp, "only one path");
960                         return 0;
961                 }
962                 condlog(4,"%s: creating new map", pp->dev);
963                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
964                         mpp->action = ACT_CREATE;
965                         /*
966                          * We don't depend on ACT_CREATE, as domap will
967                          * set it to ACT_NOTHING when complete.
968                          */
969                         start_waiter = 1;
970                 }
971                 if (!start_waiter)
972                         goto fail; /* leave path added to pathvec */
973         }
974
975         /* persistent reservation check*/
976         mpath_pr_event_handle(pp);
977
978         if (!need_do_map)
979                 return 0;
980
981         if (!dm_map_present(mpp->alias)) {
982                 mpp->action = ACT_CREATE;
983                 start_waiter = 1;
984         }
985         /*
986          * push the map to the device-mapper
987          */
988         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
989                 condlog(0, "%s: failed to setup map for addition of new "
990                         "path %s", mpp->alias, pp->dev);
991                 goto fail_map;
992         }
993         /*
994          * reload the map for the multipath mapped device
995          */
996         ret = domap(mpp, params, 1);
997         while (ret == DOMAP_RETRY && retries-- > 0) {
998                 condlog(0, "%s: retry domap for addition of new "
999                         "path %s", mpp->alias, pp->dev);
1000                 sleep(1);
1001                 ret = domap(mpp, params, 1);
1002         }
1003         if (ret == DOMAP_FAIL || ret == DOMAP_RETRY) {
1004                 condlog(0, "%s: failed in domap for addition of new "
1005                         "path %s", mpp->alias, pp->dev);
1006                 /*
1007                  * deal with asynchronous uevents :((
1008                  */
1009                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1010                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
1011                         sleep(1);
1012                         update_mpp_paths(mpp, vecs->pathvec);
1013                         goto rescan;
1014                 }
1015                 else if (mpp->action == ACT_RELOAD)
1016                         condlog(0, "%s: giving up reload", mpp->alias);
1017                 else
1018                         goto fail_map;
1019         }
1020         dm_lib_release();
1021
1022         if ((mpp->action == ACT_CREATE ||
1023              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1024             wait_for_events(mpp, vecs))
1025                         goto fail_map;
1026
1027         /*
1028          * update our state from kernel regardless of create or reload
1029          */
1030         if (setup_multipath(vecs, mpp))
1031                 goto fail; /* if setup_multipath fails, it removes the map */
1032
1033         sync_map_state(mpp);
1034
1035         if (retries >= 0) {
1036                 condlog(2, "%s [%s]: path added to devmap %s",
1037                         pp->dev, pp->dev_t, mpp->alias);
1038                 return 0;
1039         } else
1040                 goto fail;
1041
1042 fail_map:
1043         remove_map(mpp, vecs, 1);
1044 fail:
1045         orphan_path(pp, "failed to add path");
1046         return 1;
1047 }
1048
1049 static int
1050 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1051 {
1052         struct path *pp;
1053         int ret;
1054
1055         condlog(3, "%s: remove path (uevent)", uev->kernel);
1056         delete_foreign(uev->udev);
1057
1058         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1059         lock(&vecs->lock);
1060         pthread_testcancel();
1061         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1062         if (pp)
1063                 ret = ev_remove_path(pp, vecs, need_do_map);
1064         lock_cleanup_pop(vecs->lock);
1065         if (!pp) {
1066                 /* Not an error; path might have been purged earlier */
1067                 condlog(0, "%s: path already removed", uev->kernel);
1068                 return 0;
1069         }
1070         return ret;
1071 }
1072
1073 int
1074 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1075 {
1076         struct multipath * mpp;
1077         int i, retval = 0;
1078         char params[PARAMS_SIZE] = {0};
1079
1080         /*
1081          * avoid referring to the map of an orphaned path
1082          */
1083         if ((mpp = pp->mpp)) {
1084                 /*
1085                  * transform the mp->pg vector of vectors of paths
1086                  * into a mp->params string to feed the device-mapper
1087                  */
1088                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1089                         condlog(0, "%s: failed to update paths",
1090                                 mpp->alias);
1091                         goto fail;
1092                 }
1093
1094                 /*
1095                  * Make sure mpp->hwe doesn't point to freed memory
1096                  * We call extract_hwe_from_path() below to restore mpp->hwe
1097                  */
1098                 if (mpp->hwe == pp->hwe)
1099                         mpp->hwe = NULL;
1100
1101                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1102                         vector_del_slot(mpp->paths, i);
1103
1104                 /*
1105                  * remove the map IF removing the last path
1106                  */
1107                 if (VECTOR_SIZE(mpp->paths) == 0) {
1108                         char alias[WWID_SIZE];
1109
1110                         /*
1111                          * flush_map will fail if the device is open
1112                          */
1113                         strlcpy(alias, mpp->alias, WWID_SIZE);
1114                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1115                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1116                                 mpp->retry_tick = 0;
1117                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1118                                 mpp->disable_queueing = 1;
1119                                 mpp->stat_map_failures++;
1120                                 dm_queue_if_no_path(mpp->alias, 0);
1121                         }
1122                         if (!flush_map(mpp, vecs, 1)) {
1123                                 condlog(2, "%s: removed map after"
1124                                         " removing all paths",
1125                                         alias);
1126                                 retval = 0;
1127                                 goto out;
1128                         }
1129                         /*
1130                          * Not an error, continue
1131                          */
1132                 }
1133
1134                 if (mpp->hwe == NULL)
1135                         extract_hwe_from_path(mpp);
1136
1137                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1138                         condlog(0, "%s: failed to setup map for"
1139                                 " removal of path %s", mpp->alias, pp->dev);
1140                         goto fail;
1141                 }
1142
1143                 if (mpp->wait_for_udev) {
1144                         mpp->wait_for_udev = 2;
1145                         goto out;
1146                 }
1147
1148                 if (!need_do_map)
1149                         goto out;
1150                 /*
1151                  * reload the map
1152                  */
1153                 mpp->action = ACT_RELOAD;
1154                 if (domap(mpp, params, 1) == DOMAP_FAIL) {
1155                         condlog(0, "%s: failed in domap for "
1156                                 "removal of path %s",
1157                                 mpp->alias, pp->dev);
1158                         retval = 1;
1159                 } else {
1160                         /*
1161                          * update our state from kernel
1162                          */
1163                         if (setup_multipath(vecs, mpp))
1164                                 return 1;
1165                         sync_map_state(mpp);
1166
1167                         condlog(2, "%s [%s]: path removed from map %s",
1168                                 pp->dev, pp->dev_t, mpp->alias);
1169                 }
1170         }
1171
1172 out:
1173         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1174                 vector_del_slot(vecs->pathvec, i);
1175
1176         free_path(pp);
1177
1178         return retval;
1179
1180 fail:
1181         remove_map_and_stop_waiter(mpp, vecs);
1182         return 1;
1183 }
1184
1185 static int
1186 uev_update_path (struct uevent *uev, struct vectors * vecs)
1187 {
1188         int ro, retval = 0, rc;
1189         struct path * pp;
1190         struct config *conf;
1191         int disable_changed_wwids;
1192         int needs_reinit = 0;
1193
1194         switch ((rc = change_foreign(uev->udev))) {
1195         case FOREIGN_OK:
1196                 /* known foreign path, ignore event */
1197                 return 0;
1198         case FOREIGN_IGNORED:
1199                 break;
1200         case FOREIGN_ERR:
1201                 condlog(3, "%s: error in change_foreign", __func__);
1202                 break;
1203         default:
1204                 condlog(1, "%s: return code %d of change_forein is unsupported",
1205                         __func__, rc);
1206                 break;
1207         }
1208
1209         conf = get_multipath_config();
1210         disable_changed_wwids = conf->disable_changed_wwids;
1211         put_multipath_config(conf);
1212
1213         ro = uevent_get_disk_ro(uev);
1214
1215         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1216         lock(&vecs->lock);
1217         pthread_testcancel();
1218
1219         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1220         if (pp) {
1221                 struct multipath *mpp = pp->mpp;
1222                 char wwid[WWID_SIZE];
1223
1224                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1225                         needs_reinit = 1;
1226                         goto out;
1227                 }
1228                 /* Don't deal with other types of failed initialization
1229                  * now. check_path will handle it */
1230                 if (!strlen(pp->wwid))
1231                         goto out;
1232
1233                 strcpy(wwid, pp->wwid);
1234                 get_uid(pp, pp->state, uev->udev);
1235
1236                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1237                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1238                                 uev->kernel, wwid, pp->wwid,
1239                                 (disable_changed_wwids ? "disallowing" :
1240                                  "continuing"));
1241                         strcpy(pp->wwid, wwid);
1242                         if (disable_changed_wwids) {
1243                                 if (!pp->wwid_changed) {
1244                                         pp->wwid_changed = 1;
1245                                         pp->tick = 1;
1246                                         if (pp->mpp)
1247                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1248                                 }
1249                                 goto out;
1250                         }
1251                 } else {
1252                         pp->wwid_changed = 0;
1253                         udev_device_unref(pp->udev);
1254                         pp->udev = udev_device_ref(uev->udev);
1255                         conf = get_multipath_config();
1256                         pthread_cleanup_push(put_multipath_config, conf);
1257                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1258                                 condlog(1, "%s: pathinfo failed after change uevent",
1259                                         uev->kernel);
1260                         pthread_cleanup_pop(1);
1261                 }
1262
1263                 if (mpp && ro >= 0) {
1264                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1265
1266                         if (mpp->wait_for_udev)
1267                                 mpp->wait_for_udev = 2;
1268                         else {
1269                                 if (ro == 1)
1270                                         pp->mpp->force_readonly = 1;
1271                                 retval = reload_map(vecs, mpp, 0, 1);
1272                                 pp->mpp->force_readonly = 0;
1273                                 condlog(2, "%s: map %s reloaded (retval %d)",
1274                                         uev->kernel, mpp->alias, retval);
1275                         }
1276                 }
1277         }
1278 out:
1279         lock_cleanup_pop(vecs->lock);
1280         if (!pp) {
1281                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1282                 if (uev->udev) {
1283                         int flag = DI_SYSFS | DI_WWID;
1284
1285                         conf = get_multipath_config();
1286                         pthread_cleanup_push(put_multipath_config, conf);
1287                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1288                         pthread_cleanup_pop(1);
1289
1290                         if (retval == PATHINFO_SKIPPED) {
1291                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1292                                 return 0;
1293                         }
1294                 }
1295
1296                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1297         }
1298         if (needs_reinit)
1299                 retval = uev_add_path(uev, vecs, 1);
1300         return retval;
1301 }
1302
1303 static int
1304 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1305 {
1306         char *action = NULL, *devt = NULL;
1307         struct path *pp;
1308         int r = 1;
1309
1310         action = uevent_get_dm_action(uev);
1311         if (!action)
1312                 return 1;
1313         if (strncmp(action, "PATH_FAILED", 11))
1314                 goto out;
1315         devt = uevent_get_dm_path(uev);
1316         if (!devt) {
1317                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1318                 goto out;
1319         }
1320
1321         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1322         lock(&vecs->lock);
1323         pthread_testcancel();
1324         pp = find_path_by_devt(vecs->pathvec, devt);
1325         if (!pp)
1326                 goto out_lock;
1327         r = io_err_stat_handle_pathfail(pp);
1328         if (r)
1329                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1330                                 pp->dev);
1331 out_lock:
1332         lock_cleanup_pop(vecs->lock);
1333         FREE(devt);
1334         FREE(action);
1335         return r;
1336 out:
1337         FREE(action);
1338         return 1;
1339 }
1340
1341 static int
1342 map_discovery (struct vectors * vecs)
1343 {
1344         struct multipath * mpp;
1345         unsigned int i;
1346
1347         if (dm_get_maps(vecs->mpvec))
1348                 return 1;
1349
1350         vector_foreach_slot (vecs->mpvec, mpp, i)
1351                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1352                     update_multipath_status(mpp)) {
1353                         remove_map(mpp, vecs, 1);
1354                         i--;
1355                 }
1356
1357         return 0;
1358 }
1359
1360 int
1361 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1362                 void * trigger_data)
1363 {
1364         struct vectors * vecs;
1365         int r;
1366
1367         *reply = NULL;
1368         *len = 0;
1369         vecs = (struct vectors *)trigger_data;
1370
1371         if ((str != NULL) && (is_root == false) &&
1372             (strncmp(str, "list", strlen("list")) != 0) &&
1373             (strncmp(str, "show", strlen("show")) != 0)) {
1374                 *reply = STRDUP("permission deny: need to be root");
1375                 if (*reply)
1376                         *len = strlen(*reply) + 1;
1377                 return 1;
1378         }
1379
1380         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1381
1382         if (r > 0) {
1383                 if (r == ETIMEDOUT)
1384                         *reply = STRDUP("timeout\n");
1385                 else
1386                         *reply = STRDUP("fail\n");
1387                 if (*reply)
1388                         *len = strlen(*reply) + 1;
1389                 r = 1;
1390         }
1391         else if (!r && *len == 0) {
1392                 *reply = STRDUP("ok\n");
1393                 if (*reply)
1394                         *len = strlen(*reply) + 1;
1395                 r = 0;
1396         }
1397         /* else if (r < 0) leave *reply alone */
1398
1399         return r;
1400 }
1401
1402 int
1403 uev_trigger (struct uevent * uev, void * trigger_data)
1404 {
1405         int r = 0;
1406         struct vectors * vecs;
1407         struct uevent *merge_uev, *tmp;
1408
1409         vecs = (struct vectors *)trigger_data;
1410
1411         pthread_cleanup_push(config_cleanup, NULL);
1412         pthread_mutex_lock(&config_lock);
1413         if (running_state != DAEMON_IDLE &&
1414             running_state != DAEMON_RUNNING)
1415                 pthread_cond_wait(&config_cond, &config_lock);
1416         pthread_cleanup_pop(1);
1417
1418         if (running_state == DAEMON_SHUTDOWN)
1419                 return 0;
1420
1421         /*
1422          * device map event
1423          * Add events are ignored here as the tables
1424          * are not fully initialised then.
1425          */
1426         if (!strncmp(uev->kernel, "dm-", 3)) {
1427                 if (!uevent_is_mpath(uev)) {
1428                         if (!strncmp(uev->action, "change", 6))
1429                                 (void)add_foreign(uev->udev);
1430                         else if (!strncmp(uev->action, "remove", 6))
1431                                 (void)delete_foreign(uev->udev);
1432                         goto out;
1433                 }
1434                 if (!strncmp(uev->action, "change", 6)) {
1435                         r = uev_add_map(uev, vecs);
1436
1437                         /*
1438                          * the kernel-side dm-mpath issues a PATH_FAILED event
1439                          * when it encounters a path IO error. It is reason-
1440                          * able be the entry of path IO error accounting pro-
1441                          * cess.
1442                          */
1443                         uev_pathfail_check(uev, vecs);
1444                 } else if (!strncmp(uev->action, "remove", 6)) {
1445                         r = uev_remove_map(uev, vecs);
1446                 }
1447                 goto out;
1448         }
1449
1450         /*
1451          * path add/remove/change event, add/remove maybe merged
1452          */
1453         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1454                 if (!strncmp(merge_uev->action, "add", 3))
1455                         r += uev_add_path(merge_uev, vecs, 0);
1456                 if (!strncmp(merge_uev->action, "remove", 6))
1457                         r += uev_remove_path(merge_uev, vecs, 0);
1458         }
1459
1460         if (!strncmp(uev->action, "add", 3))
1461                 r += uev_add_path(uev, vecs, 1);
1462         if (!strncmp(uev->action, "remove", 6))
1463                 r += uev_remove_path(uev, vecs, 1);
1464         if (!strncmp(uev->action, "change", 6))
1465                 r += uev_update_path(uev, vecs);
1466
1467 out:
1468         return r;
1469 }
1470
1471 static void rcu_unregister(void *param)
1472 {
1473         rcu_unregister_thread();
1474 }
1475
1476 static void *
1477 ueventloop (void * ap)
1478 {
1479         struct udev *udev = ap;
1480
1481         pthread_cleanup_push(rcu_unregister, NULL);
1482         rcu_register_thread();
1483         if (uevent_listen(udev))
1484                 condlog(0, "error starting uevent listener");
1485         pthread_cleanup_pop(1);
1486         return NULL;
1487 }
1488
1489 static void *
1490 uevqloop (void * ap)
1491 {
1492         pthread_cleanup_push(rcu_unregister, NULL);
1493         rcu_register_thread();
1494         if (uevent_dispatch(&uev_trigger, ap))
1495                 condlog(0, "error starting uevent dispatcher");
1496         pthread_cleanup_pop(1);
1497         return NULL;
1498 }
1499 static void *
1500 uxlsnrloop (void * ap)
1501 {
1502         long ux_sock;
1503
1504         pthread_cleanup_push(rcu_unregister, NULL);
1505         rcu_register_thread();
1506
1507         ux_sock = ux_socket_listen(DEFAULT_SOCKET);
1508         if (ux_sock == -1) {
1509                 condlog(1, "could not create uxsock: %d", errno);
1510                 exit_daemon();
1511                 goto out;
1512         }
1513         pthread_cleanup_push(uxsock_cleanup, (void *)ux_sock);
1514
1515         if (cli_init()) {
1516                 condlog(1, "Failed to init uxsock listener");
1517                 exit_daemon();
1518                 goto out_sock;
1519         }
1520
1521         /* Tell main thread that thread has started */
1522         post_config_state(DAEMON_CONFIGURE);
1523
1524         set_handler_callback(LIST+PATHS, cli_list_paths);
1525         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1526         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1527         set_handler_callback(LIST+PATH, cli_list_path);
1528         set_handler_callback(LIST+MAPS, cli_list_maps);
1529         set_handler_callback(LIST+STATUS, cli_list_status);
1530         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1531         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1532         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1533         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1534         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1535         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1536         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1537         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1538         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1539         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1540         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1541         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1542         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1543         set_handler_callback(LIST+CONFIG, cli_list_config);
1544         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1545         set_handler_callback(LIST+DEVICES, cli_list_devices);
1546         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1547         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1548         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1549         set_handler_callback(ADD+PATH, cli_add_path);
1550         set_handler_callback(DEL+PATH, cli_del_path);
1551         set_handler_callback(ADD+MAP, cli_add_map);
1552         set_handler_callback(DEL+MAP, cli_del_map);
1553         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1554         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1555         set_handler_callback(SUSPEND+MAP, cli_suspend);
1556         set_handler_callback(RESUME+MAP, cli_resume);
1557         set_handler_callback(RESIZE+MAP, cli_resize);
1558         set_handler_callback(RELOAD+MAP, cli_reload);
1559         set_handler_callback(RESET+MAP, cli_reassign);
1560         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1561         set_handler_callback(FAIL+PATH, cli_fail);
1562         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1563         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1564         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1565         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1566         set_unlocked_handler_callback(QUIT, cli_quit);
1567         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1568         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1569         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1570         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1571         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1572         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1573         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1574         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1575         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1576
1577         umask(077);
1578         uxsock_listen(&uxsock_trigger, ux_sock, ap);
1579
1580 out_sock:
1581         pthread_cleanup_pop(1); /* uxsock_cleanup */
1582 out:
1583         pthread_cleanup_pop(1); /* rcu_unregister */
1584         return NULL;
1585 }
1586
1587 void
1588 exit_daemon (void)
1589 {
1590         post_config_state(DAEMON_SHUTDOWN);
1591 }
1592
1593 static void
1594 fail_path (struct path * pp, int del_active)
1595 {
1596         if (!pp->mpp)
1597                 return;
1598
1599         condlog(2, "checker failed path %s in map %s",
1600                  pp->dev_t, pp->mpp->alias);
1601
1602         dm_fail_path(pp->mpp->alias, pp->dev_t);
1603         if (del_active)
1604                 update_queue_mode_del_path(pp->mpp);
1605 }
1606
1607 /*
1608  * caller must have locked the path list before calling that function
1609  */
1610 static int
1611 reinstate_path (struct path * pp, int add_active)
1612 {
1613         int ret = 0;
1614
1615         if (!pp->mpp)
1616                 return 0;
1617
1618         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1619                 condlog(0, "%s: reinstate failed", pp->dev_t);
1620                 ret = 1;
1621         } else {
1622                 condlog(2, "%s: reinstated", pp->dev_t);
1623                 if (add_active)
1624                         update_queue_mode_add_path(pp->mpp);
1625         }
1626         return ret;
1627 }
1628
1629 static void
1630 enable_group(struct path * pp)
1631 {
1632         struct pathgroup * pgp;
1633
1634         /*
1635          * if path is added through uev_add_path, pgindex can be unset.
1636          * next update_strings() will set it, upon map reload event.
1637          *
1638          * we can safely return here, because upon map reload, all
1639          * PG will be enabled.
1640          */
1641         if (!pp->mpp->pg || !pp->pgindex)
1642                 return;
1643
1644         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1645
1646         if (pgp->status == PGSTATE_DISABLED) {
1647                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1648                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1649         }
1650 }
1651
1652 static void
1653 mpvec_garbage_collector (struct vectors * vecs)
1654 {
1655         struct multipath * mpp;
1656         unsigned int i;
1657
1658         if (!vecs->mpvec)
1659                 return;
1660
1661         vector_foreach_slot (vecs->mpvec, mpp, i) {
1662                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1663                         condlog(2, "%s: remove dead map", mpp->alias);
1664                         remove_map_and_stop_waiter(mpp, vecs);
1665                         i--;
1666                 }
1667         }
1668 }
1669
1670 /* This is called after a path has started working again. It the multipath
1671  * device for this path uses the followover failback type, and this is the
1672  * best pathgroup, and this is the first path in the pathgroup to come back
1673  * up, then switch to this pathgroup */
1674 static int
1675 followover_should_failback(struct path * pp)
1676 {
1677         struct pathgroup * pgp;
1678         struct path *pp1;
1679         int i;
1680
1681         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1682             !pp->mpp->pg || !pp->pgindex ||
1683             pp->pgindex != pp->mpp->bestpg)
1684                 return 0;
1685
1686         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1687         vector_foreach_slot(pgp->paths, pp1, i) {
1688                 if (pp1 == pp)
1689                         continue;
1690                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1691                         return 0;
1692         }
1693         return 1;
1694 }
1695
1696 static void
1697 missing_uev_wait_tick(struct vectors *vecs)
1698 {
1699         struct multipath * mpp;
1700         unsigned int i;
1701         int timed_out = 0, delayed_reconfig;
1702         struct config *conf;
1703
1704         vector_foreach_slot (vecs->mpvec, mpp, i) {
1705                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1706                         timed_out = 1;
1707                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1708                         if (mpp->wait_for_udev > 1 &&
1709                             update_map(mpp, vecs, 0)) {
1710                                 /* update_map removed map */
1711                                 i--;
1712                                 continue;
1713                         }
1714                         mpp->wait_for_udev = 0;
1715                 }
1716         }
1717
1718         conf = get_multipath_config();
1719         delayed_reconfig = conf->delayed_reconfig;
1720         put_multipath_config(conf);
1721         if (timed_out && delayed_reconfig &&
1722             !need_to_delay_reconfig(vecs)) {
1723                 condlog(2, "reconfigure (delayed)");
1724                 set_config_state(DAEMON_CONFIGURE);
1725         }
1726 }
1727
1728 static void
1729 ghost_delay_tick(struct vectors *vecs)
1730 {
1731         struct multipath * mpp;
1732         unsigned int i;
1733
1734         vector_foreach_slot (vecs->mpvec, mpp, i) {
1735                 if (mpp->ghost_delay_tick <= 0)
1736                         continue;
1737                 if (--mpp->ghost_delay_tick <= 0) {
1738                         condlog(0, "%s: timed out waiting for active path",
1739                                 mpp->alias);
1740                         mpp->force_udev_reload = 1;
1741                         if (update_map(mpp, vecs, 0) != 0) {
1742                                 /* update_map removed map */
1743                                 i--;
1744                                 continue;
1745                         }
1746                 }
1747         }
1748 }
1749
1750 static void
1751 defered_failback_tick (vector mpvec)
1752 {
1753         struct multipath * mpp;
1754         unsigned int i;
1755
1756         vector_foreach_slot (mpvec, mpp, i) {
1757                 /*
1758                  * deferred failback getting sooner
1759                  */
1760                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1761                         mpp->failback_tick--;
1762
1763                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1764                                 switch_pathgroup(mpp);
1765                 }
1766         }
1767 }
1768
1769 static void
1770 retry_count_tick(vector mpvec)
1771 {
1772         struct multipath *mpp;
1773         unsigned int i;
1774
1775         vector_foreach_slot (mpvec, mpp, i) {
1776                 if (mpp->retry_tick > 0) {
1777                         mpp->stat_total_queueing_time++;
1778                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1779                         if(--mpp->retry_tick == 0) {
1780                                 mpp->stat_map_failures++;
1781                                 dm_queue_if_no_path(mpp->alias, 0);
1782                                 condlog(2, "%s: Disable queueing", mpp->alias);
1783                         }
1784                 }
1785         }
1786 }
1787
1788 int update_prio(struct path *pp, int refresh_all)
1789 {
1790         int oldpriority;
1791         struct path *pp1;
1792         struct pathgroup * pgp;
1793         int i, j, changed = 0;
1794         struct config *conf;
1795
1796         if (refresh_all) {
1797                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1798                         vector_foreach_slot (pgp->paths, pp1, j) {
1799                                 oldpriority = pp1->priority;
1800                                 conf = get_multipath_config();
1801                                 pthread_cleanup_push(put_multipath_config,
1802                                                      conf);
1803                                 pathinfo(pp1, conf, DI_PRIO);
1804                                 pthread_cleanup_pop(1);
1805                                 if (pp1->priority != oldpriority)
1806                                         changed = 1;
1807                         }
1808                 }
1809                 return changed;
1810         }
1811         oldpriority = pp->priority;
1812         conf = get_multipath_config();
1813         pthread_cleanup_push(put_multipath_config, conf);
1814         if (pp->state != PATH_DOWN)
1815                 pathinfo(pp, conf, DI_PRIO);
1816         pthread_cleanup_pop(1);
1817
1818         if (pp->priority == oldpriority)
1819                 return 0;
1820         return 1;
1821 }
1822
1823 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1824 {
1825         if (reload_map(vecs, mpp, refresh, 1))
1826                 return 1;
1827
1828         dm_lib_release();
1829         if (setup_multipath(vecs, mpp) != 0)
1830                 return 1;
1831         sync_map_state(mpp);
1832
1833         return 0;
1834 }
1835
1836 /*
1837  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1838  * and '0' otherwise
1839  */
1840 int
1841 check_path (struct vectors * vecs, struct path * pp, int ticks)
1842 {
1843         int newstate;
1844         int new_path_up = 0;
1845         int chkr_new_path_up = 0;
1846         int add_active;
1847         int disable_reinstate = 0;
1848         int oldchkrstate = pp->chkrstate;
1849         int retrigger_tries, checkint, max_checkint, verbosity;
1850         struct config *conf;
1851         int ret;
1852
1853         if ((pp->initialized == INIT_OK ||
1854              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1855                 return 0;
1856
1857         if (pp->tick)
1858                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1859         if (pp->tick)
1860                 return 0; /* don't check this path yet */
1861
1862         conf = get_multipath_config();
1863         retrigger_tries = conf->retrigger_tries;
1864         checkint = conf->checkint;
1865         max_checkint = conf->max_checkint;
1866         verbosity = conf->verbosity;
1867         put_multipath_config(conf);
1868
1869         if (pp->checkint == CHECKINT_UNDEF) {
1870                 condlog(0, "%s: BUG: checkint is not set", pp->dev);
1871                 pp->checkint = checkint;
1872         };
1873
1874         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
1875                 if (pp->retriggers < retrigger_tries) {
1876                         condlog(2, "%s: triggering change event to reinitialize",
1877                                 pp->dev);
1878                         pp->initialized = INIT_REQUESTED_UDEV;
1879                         pp->retriggers++;
1880                         sysfs_attr_set_value(pp->udev, "uevent", "change",
1881                                              strlen("change"));
1882                         return 0;
1883                 } else {
1884                         condlog(1, "%s: not initialized after %d udev retriggers",
1885                                 pp->dev, retrigger_tries);
1886                         /*
1887                          * Make sure that the "add missing path" code path
1888                          * below may reinstate the path later, if it ever
1889                          * comes up again.
1890                          * The WWID needs not be cleared; if it was set, the
1891                          * state hadn't been INIT_MISSING_UDEV in the first
1892                          * place.
1893                          */
1894                         pp->initialized = INIT_FAILED;
1895                         return 0;
1896                 }
1897         }
1898
1899         /*
1900          * provision a next check soonest,
1901          * in case we exit abnormaly from here
1902          */
1903         pp->tick = checkint;
1904
1905         newstate = path_offline(pp);
1906         if (newstate == PATH_UP) {
1907                 conf = get_multipath_config();
1908                 pthread_cleanup_push(put_multipath_config, conf);
1909                 newstate = get_state(pp, conf, 1, newstate);
1910                 pthread_cleanup_pop(1);
1911         } else {
1912                 checker_clear_message(&pp->checker);
1913                 condlog(3, "%s: state %s, checker not called",
1914                         pp->dev, checker_state_name(newstate));
1915         }
1916         /*
1917          * Wait for uevent for removed paths;
1918          * some LLDDs like zfcp keep paths unavailable
1919          * without sending uevents.
1920          */
1921         if (newstate == PATH_REMOVED)
1922                 newstate = PATH_DOWN;
1923
1924         if (pp->wwid_changed) {
1925                 condlog(2, "%s: path wwid has changed. Refusing to use",
1926                         pp->dev);
1927                 newstate = PATH_DOWN;
1928         }
1929
1930         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1931                 condlog(2, "%s: unusable path - checker failed", pp->dev);
1932                 LOG_MSG(2, verbosity, pp);
1933                 conf = get_multipath_config();
1934                 pthread_cleanup_push(put_multipath_config, conf);
1935                 pathinfo(pp, conf, 0);
1936                 pthread_cleanup_pop(1);
1937                 return 1;
1938         }
1939         if (!pp->mpp) {
1940                 if (!strlen(pp->wwid) && pp->initialized == INIT_FAILED &&
1941                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1942                         condlog(2, "%s: add missing path", pp->dev);
1943                         conf = get_multipath_config();
1944                         pthread_cleanup_push(put_multipath_config, conf);
1945                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1946                         pthread_cleanup_pop(1);
1947                         /* INIT_OK implies ret == PATHINFO_OK */
1948                         if (pp->initialized == INIT_OK) {
1949                                 ev_add_path(pp, vecs, 1);
1950                                 pp->tick = 1;
1951                         } else {
1952                                 /*
1953                                  * We failed multiple times to initialize this
1954                                  * path properly. Don't re-check too often.
1955                                  */
1956                                 pp->checkint = max_checkint;
1957                                 if (ret == PATHINFO_SKIPPED)
1958                                         return -1;
1959                         }
1960                 }
1961                 return 0;
1962         }
1963         /*
1964          * Async IO in flight. Keep the previous path state
1965          * and reschedule as soon as possible
1966          */
1967         if (newstate == PATH_PENDING) {
1968                 pp->tick = 1;
1969                 return 0;
1970         }
1971         /*
1972          * Synchronize with kernel state
1973          */
1974         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1975                 condlog(1, "%s: Could not synchronize with kernel state",
1976                         pp->dev);
1977                 pp->dmstate = PSTATE_UNDEF;
1978         }
1979         /* if update_multipath_strings orphaned the path, quit early */
1980         if (!pp->mpp)
1981                 return 0;
1982
1983         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1984                 pp->state = PATH_SHAKY;
1985                 /*
1986                  * to reschedule as soon as possible,so that this path can
1987                  * be recoverd in time
1988                  */
1989                 pp->tick = 1;
1990                 return 1;
1991         }
1992
1993         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1994              pp->wait_checks > 0) {
1995                 if (pp->mpp->nr_active > 0) {
1996                         pp->state = PATH_DELAYED;
1997                         pp->wait_checks--;
1998                         return 1;
1999                 } else
2000                         pp->wait_checks = 0;
2001         }
2002
2003         /*
2004          * don't reinstate failed path, if its in stand-by
2005          * and if target supports only implicit tpgs mode.
2006          * this will prevent unnecessary i/o by dm on stand-by
2007          * paths if there are no other active paths in map.
2008          */
2009         disable_reinstate = (newstate == PATH_GHOST &&
2010                             pp->mpp->nr_active == 0 &&
2011                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
2012
2013         pp->chkrstate = newstate;
2014         if (newstate != pp->state) {
2015                 int oldstate = pp->state;
2016                 pp->state = newstate;
2017
2018                 LOG_MSG(1, verbosity, pp);
2019
2020                 /*
2021                  * upon state change, reset the checkint
2022                  * to the shortest delay
2023                  */
2024                 conf = get_multipath_config();
2025                 pp->checkint = conf->checkint;
2026                 put_multipath_config(conf);
2027
2028                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
2029                         /*
2030                          * proactively fail path in the DM
2031                          */
2032                         if (oldstate == PATH_UP ||
2033                             oldstate == PATH_GHOST) {
2034                                 fail_path(pp, 1);
2035                                 if (pp->mpp->delay_wait_checks > 0 &&
2036                                     pp->watch_checks > 0) {
2037                                         pp->wait_checks = pp->mpp->delay_wait_checks;
2038                                         pp->watch_checks = 0;
2039                                 }
2040                         } else {
2041                                 fail_path(pp, 0);
2042                                 if (pp->wait_checks > 0)
2043                                         pp->wait_checks =
2044                                                 pp->mpp->delay_wait_checks;
2045                         }
2046
2047                         /*
2048                          * cancel scheduled failback
2049                          */
2050                         pp->mpp->failback_tick = 0;
2051
2052                         pp->mpp->stat_path_failures++;
2053                         return 1;
2054                 }
2055
2056                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2057                         if (pp->mpp->prflag) {
2058                                 /*
2059                                  * Check Persistent Reservation.
2060                                  */
2061                                 condlog(2, "%s: checking persistent "
2062                                         "reservation registration", pp->dev);
2063                                 mpath_pr_event_handle(pp);
2064                         }
2065                 }
2066
2067                 /*
2068                  * reinstate this path
2069                  */
2070                 if (oldstate != PATH_UP &&
2071                     oldstate != PATH_GHOST) {
2072                         if (pp->mpp->delay_watch_checks > 0)
2073                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2074                         add_active = 1;
2075                 } else {
2076                         if (pp->watch_checks > 0)
2077                                 pp->watch_checks--;
2078                         add_active = 0;
2079                 }
2080                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2081                         condlog(3, "%s: reload map", pp->dev);
2082                         ev_add_path(pp, vecs, 1);
2083                         pp->tick = 1;
2084                         return 0;
2085                 }
2086                 new_path_up = 1;
2087
2088                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2089                         chkr_new_path_up = 1;
2090
2091                 /*
2092                  * if at least one path is up in a group, and
2093                  * the group is disabled, re-enable it
2094                  */
2095                 if (newstate == PATH_UP)
2096                         enable_group(pp);
2097         }
2098         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2099                 if ((pp->dmstate == PSTATE_FAILED ||
2100                     pp->dmstate == PSTATE_UNDEF) &&
2101                     !disable_reinstate) {
2102                         /* Clear IO errors */
2103                         if (reinstate_path(pp, 0)) {
2104                                 condlog(3, "%s: reload map", pp->dev);
2105                                 ev_add_path(pp, vecs, 1);
2106                                 pp->tick = 1;
2107                                 return 0;
2108                         }
2109                 } else {
2110                         LOG_MSG(4, verbosity, pp);
2111                         if (pp->checkint != max_checkint) {
2112                                 /*
2113                                  * double the next check delay.
2114                                  * max at conf->max_checkint
2115                                  */
2116                                 if (pp->checkint < (max_checkint / 2))
2117                                         pp->checkint = 2 * pp->checkint;
2118                                 else
2119                                         pp->checkint = max_checkint;
2120
2121                                 condlog(4, "%s: delay next check %is",
2122                                         pp->dev_t, pp->checkint);
2123                         }
2124                         if (pp->watch_checks > 0)
2125                                 pp->watch_checks--;
2126                         pp->tick = pp->checkint;
2127                 }
2128         }
2129         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2130                 if (pp->dmstate == PSTATE_ACTIVE ||
2131                     pp->dmstate == PSTATE_UNDEF)
2132                         fail_path(pp, 0);
2133                 if (newstate == PATH_DOWN) {
2134                         int log_checker_err;
2135
2136                         conf = get_multipath_config();
2137                         log_checker_err = conf->log_checker_err;
2138                         put_multipath_config(conf);
2139                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2140                                 LOG_MSG(3, verbosity, pp);
2141                         else
2142                                 LOG_MSG(2, verbosity, pp);
2143                 }
2144         }
2145
2146         pp->state = newstate;
2147
2148         if (pp->mpp->wait_for_udev)
2149                 return 1;
2150         /*
2151          * path prio refreshing
2152          */
2153         condlog(4, "path prio refresh");
2154
2155         if (update_prio(pp, new_path_up) &&
2156             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2157              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2158                 update_path_groups(pp->mpp, vecs, !new_path_up);
2159         else if (need_switch_pathgroup(pp->mpp, 0)) {
2160                 if (pp->mpp->pgfailback > 0 &&
2161                     (new_path_up || pp->mpp->failback_tick <= 0))
2162                         pp->mpp->failback_tick =
2163                                 pp->mpp->pgfailback + 1;
2164                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2165                          (chkr_new_path_up && followover_should_failback(pp)))
2166                         switch_pathgroup(pp->mpp);
2167         }
2168         return 1;
2169 }
2170
2171 static void *
2172 checkerloop (void *ap)
2173 {
2174         struct vectors *vecs;
2175         struct path *pp;
2176         int count = 0;
2177         unsigned int i;
2178         struct timespec last_time;
2179         struct config *conf;
2180
2181         pthread_cleanup_push(rcu_unregister, NULL);
2182         rcu_register_thread();
2183         mlockall(MCL_CURRENT | MCL_FUTURE);
2184         vecs = (struct vectors *)ap;
2185         condlog(2, "path checkers start up");
2186
2187         /* Tweak start time for initial path check */
2188         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2189                 last_time.tv_sec = 0;
2190         else
2191                 last_time.tv_sec -= 1;
2192
2193         while (1) {
2194                 struct timespec diff_time, start_time, end_time;
2195                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2196
2197                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2198                         start_time.tv_sec = 0;
2199                 if (start_time.tv_sec && last_time.tv_sec) {
2200                         timespecsub(&start_time, &last_time, &diff_time);
2201                         condlog(4, "tick (%lu.%06lu secs)",
2202                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2203                         last_time = start_time;
2204                         ticks = diff_time.tv_sec;
2205                 } else {
2206                         ticks = 1;
2207                         condlog(4, "tick (%d ticks)", ticks);
2208                 }
2209 #ifdef USE_SYSTEMD
2210                 if (use_watchdog)
2211                         sd_notify(0, "WATCHDOG=1");
2212 #endif
2213                 rc = set_config_state(DAEMON_RUNNING);
2214                 if (rc == ETIMEDOUT) {
2215                         condlog(4, "timeout waiting for DAEMON_IDLE");
2216                         continue;
2217                 } else if (rc == EINVAL)
2218                         /* daemon shutdown */
2219                         break;
2220
2221                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2222                 lock(&vecs->lock);
2223                 pthread_testcancel();
2224                 vector_foreach_slot (vecs->pathvec, pp, i) {
2225                         rc = check_path(vecs, pp, ticks);
2226                         if (rc < 0) {
2227                                 vector_del_slot(vecs->pathvec, i);
2228                                 free_path(pp);
2229                                 i--;
2230                         } else
2231                                 num_paths += rc;
2232                 }
2233                 lock_cleanup_pop(vecs->lock);
2234
2235                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2236                 lock(&vecs->lock);
2237                 pthread_testcancel();
2238                 defered_failback_tick(vecs->mpvec);
2239                 retry_count_tick(vecs->mpvec);
2240                 missing_uev_wait_tick(vecs);
2241                 ghost_delay_tick(vecs);
2242                 lock_cleanup_pop(vecs->lock);
2243
2244                 if (count)
2245                         count--;
2246                 else {
2247                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2248                         lock(&vecs->lock);
2249                         pthread_testcancel();
2250                         condlog(4, "map garbage collection");
2251                         mpvec_garbage_collector(vecs);
2252                         count = MAPGCINT;
2253                         lock_cleanup_pop(vecs->lock);
2254                 }
2255
2256                 diff_time.tv_nsec = 0;
2257                 if (start_time.tv_sec &&
2258                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2259                         timespecsub(&end_time, &start_time, &diff_time);
2260                         if (num_paths) {
2261                                 unsigned int max_checkint;
2262
2263                                 condlog(4, "checked %d path%s in %lu.%06lu secs",
2264                                         num_paths, num_paths > 1 ? "s" : "",
2265                                         diff_time.tv_sec,
2266                                         diff_time.tv_nsec / 1000);
2267                                 conf = get_multipath_config();
2268                                 max_checkint = conf->max_checkint;
2269                                 put_multipath_config(conf);
2270                                 if (diff_time.tv_sec > max_checkint)
2271                                         condlog(1, "path checkers took longer "
2272                                                 "than %lu seconds, consider "
2273                                                 "increasing max_polling_interval",
2274                                                 diff_time.tv_sec);
2275                         }
2276                 }
2277                 check_foreign();
2278                 post_config_state(DAEMON_IDLE);
2279                 conf = get_multipath_config();
2280                 strict_timing = conf->strict_timing;
2281                 put_multipath_config(conf);
2282                 if (!strict_timing)
2283                         sleep(1);
2284                 else {
2285                         if (diff_time.tv_nsec) {
2286                                 diff_time.tv_sec = 0;
2287                                 diff_time.tv_nsec =
2288                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2289                         } else
2290                                 diff_time.tv_sec = 1;
2291
2292                         condlog(3, "waiting for %lu.%06lu secs",
2293                                 diff_time.tv_sec,
2294                                 diff_time.tv_nsec / 1000);
2295                         if (nanosleep(&diff_time, NULL) != 0) {
2296                                 condlog(3, "nanosleep failed with error %d",
2297                                         errno);
2298                                 conf = get_multipath_config();
2299                                 conf->strict_timing = 0;
2300                                 put_multipath_config(conf);
2301                                 break;
2302                         }
2303                 }
2304         }
2305         pthread_cleanup_pop(1);
2306         return NULL;
2307 }
2308
2309 int
2310 configure (struct vectors * vecs)
2311 {
2312         struct multipath * mpp;
2313         struct path * pp;
2314         vector mpvec;
2315         int i, ret;
2316         struct config *conf;
2317         static int force_reload = FORCE_RELOAD_WEAK;
2318
2319         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2320                 condlog(0, "couldn't allocate path vec in configure");
2321                 return 1;
2322         }
2323
2324         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2325                 condlog(0, "couldn't allocate multipath vec in configure");
2326                 return 1;
2327         }
2328
2329         if (!(mpvec = vector_alloc())) {
2330                 condlog(0, "couldn't allocate new maps vec in configure");
2331                 return 1;
2332         }
2333
2334         /*
2335          * probe for current path (from sysfs) and map (from dm) sets
2336          */
2337         ret = path_discovery(vecs->pathvec, DI_ALL);
2338         if (ret < 0) {
2339                 condlog(0, "configure failed at path discovery");
2340                 goto fail;
2341         }
2342
2343         conf = get_multipath_config();
2344         pthread_cleanup_push(put_multipath_config, conf);
2345         vector_foreach_slot (vecs->pathvec, pp, i){
2346                 if (filter_path(conf, pp) > 0){
2347                         vector_del_slot(vecs->pathvec, i);
2348                         free_path(pp);
2349                         i--;
2350                 }
2351         }
2352         pthread_cleanup_pop(1);
2353
2354         if (map_discovery(vecs)) {
2355                 condlog(0, "configure failed at map discovery");
2356                 goto fail;
2357         }
2358
2359         /*
2360          * create new set of maps & push changed ones into dm
2361          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2362          * superfluous ACT_RELOAD ioctls. Later calls are done
2363          * with FORCE_RELOAD_YES.
2364          */
2365         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2366         if (force_reload == FORCE_RELOAD_WEAK)
2367                 force_reload = FORCE_RELOAD_YES;
2368         if (ret != CP_OK) {
2369                 condlog(0, "configure failed while coalescing paths");
2370                 goto fail;
2371         }
2372
2373         /*
2374          * may need to remove some maps which are no longer relevant
2375          * e.g., due to blacklist changes in conf file
2376          */
2377         if (coalesce_maps(vecs, mpvec)) {
2378                 condlog(0, "configure failed while coalescing maps");
2379                 goto fail;
2380         }
2381
2382         dm_lib_release();
2383
2384         sync_maps_state(mpvec);
2385         vector_foreach_slot(mpvec, mpp, i){
2386                 if (remember_wwid(mpp->wwid) == 1)
2387                         trigger_paths_udev_change(mpp, true);
2388                 update_map_pr(mpp);
2389         }
2390
2391         /*
2392          * purge dm of old maps
2393          */
2394         remove_maps(vecs);
2395
2396         /*
2397          * save new set of maps formed by considering current path state
2398          */
2399         vector_free(vecs->mpvec);
2400         vecs->mpvec = mpvec;
2401
2402         /*
2403          * start dm event waiter threads for these new maps
2404          */
2405         vector_foreach_slot(vecs->mpvec, mpp, i) {
2406                 if (wait_for_events(mpp, vecs)) {
2407                         remove_map(mpp, vecs, 1);
2408                         i--;
2409                         continue;
2410                 }
2411                 if (setup_multipath(vecs, mpp))
2412                         i--;
2413         }
2414         return 0;
2415
2416 fail:
2417         vector_free(mpvec);
2418         return 1;
2419 }
2420
2421 int
2422 need_to_delay_reconfig(struct vectors * vecs)
2423 {
2424         struct multipath *mpp;
2425         int i;
2426
2427         if (!VECTOR_SIZE(vecs->mpvec))
2428                 return 0;
2429
2430         vector_foreach_slot(vecs->mpvec, mpp, i) {
2431                 if (mpp->wait_for_udev)
2432                         return 1;
2433         }
2434         return 0;
2435 }
2436
2437 void rcu_free_config(struct rcu_head *head)
2438 {
2439         struct config *conf = container_of(head, struct config, rcu);
2440
2441         free_config(conf);
2442 }
2443
2444 int
2445 reconfigure (struct vectors * vecs)
2446 {
2447         struct config * old, *conf;
2448
2449         conf = load_config(DEFAULT_CONFIGFILE);
2450         if (!conf)
2451                 return 1;
2452
2453         /*
2454          * free old map and path vectors ... they use old conf state
2455          */
2456         if (VECTOR_SIZE(vecs->mpvec))
2457                 remove_maps_and_stop_waiters(vecs);
2458
2459         free_pathvec(vecs->pathvec, FREE_PATHS);
2460         vecs->pathvec = NULL;
2461         delete_all_foreign();
2462
2463         /* Re-read any timezone changes */
2464         tzset();
2465
2466         dm_tgt_version(conf->version, TGT_MPATH);
2467         if (verbosity)
2468                 conf->verbosity = verbosity;
2469         if (bindings_read_only)
2470                 conf->bindings_read_only = bindings_read_only;
2471         uxsock_timeout = conf->uxsock_timeout;
2472
2473         old = rcu_dereference(multipath_conf);
2474         rcu_assign_pointer(multipath_conf, conf);
2475         call_rcu(&old->rcu, rcu_free_config);
2476
2477         configure(vecs);
2478
2479
2480         return 0;
2481 }
2482
2483 static struct vectors *
2484 init_vecs (void)
2485 {
2486         struct vectors * vecs;
2487
2488         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2489
2490         if (!vecs)
2491                 return NULL;
2492
2493         pthread_mutex_init(&vecs->lock.mutex, NULL);
2494
2495         return vecs;
2496 }
2497
2498 static void *
2499 signal_set(int signo, void (*func) (int))
2500 {
2501         int r;
2502         struct sigaction sig;
2503         struct sigaction osig;
2504
2505         sig.sa_handler = func;
2506         sigemptyset(&sig.sa_mask);
2507         sig.sa_flags = 0;
2508
2509         r = sigaction(signo, &sig, &osig);
2510
2511         if (r < 0)
2512                 return (SIG_ERR);
2513         else
2514                 return (osig.sa_handler);
2515 }
2516
2517 void
2518 handle_signals(bool nonfatal)
2519 {
2520         if (exit_sig) {
2521                 condlog(2, "exit (signal)");
2522                 exit_sig = 0;
2523                 exit_daemon();
2524         }
2525         if (!nonfatal)
2526                 return;
2527         if (reconfig_sig) {
2528                 condlog(2, "reconfigure (signal)");
2529                 set_config_state(DAEMON_CONFIGURE);
2530         }
2531         if (log_reset_sig) {
2532                 condlog(2, "reset log (signal)");
2533                 if (logsink == 1)
2534                         log_thread_reset();
2535         }
2536         reconfig_sig = 0;
2537         log_reset_sig = 0;
2538 }
2539
2540 static void
2541 sighup (int sig)
2542 {
2543         reconfig_sig = 1;
2544 }
2545
2546 static void
2547 sigend (int sig)
2548 {
2549         exit_sig = 1;
2550 }
2551
2552 static void
2553 sigusr1 (int sig)
2554 {
2555         log_reset_sig = 1;
2556 }
2557
2558 static void
2559 sigusr2 (int sig)
2560 {
2561         condlog(3, "SIGUSR2 received");
2562 }
2563
2564 static void
2565 signal_init(void)
2566 {
2567         sigset_t set;
2568
2569         /* block all signals */
2570         sigfillset(&set);
2571         /* SIGPIPE occurs if logging fails */
2572         sigdelset(&set, SIGPIPE);
2573         pthread_sigmask(SIG_SETMASK, &set, NULL);
2574
2575         /* Other signals will be unblocked in the uxlsnr thread */
2576         signal_set(SIGHUP, sighup);
2577         signal_set(SIGUSR1, sigusr1);
2578         signal_set(SIGUSR2, sigusr2);
2579         signal_set(SIGINT, sigend);
2580         signal_set(SIGTERM, sigend);
2581         signal_set(SIGPIPE, sigend);
2582 }
2583
2584 static void
2585 setscheduler (void)
2586 {
2587         int res;
2588         static struct sched_param sched_param = {
2589                 .sched_priority = 99
2590         };
2591
2592         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2593
2594         if (res == -1)
2595                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2596         return;
2597 }
2598
2599 static void
2600 set_oom_adj (void)
2601 {
2602 #ifdef OOM_SCORE_ADJ_MIN
2603         int retry = 1;
2604         char *file = "/proc/self/oom_score_adj";
2605         int score = OOM_SCORE_ADJ_MIN;
2606 #else
2607         int retry = 0;
2608         char *file = "/proc/self/oom_adj";
2609         int score = OOM_ADJUST_MIN;
2610 #endif
2611         FILE *fp;
2612         struct stat st;
2613         char *envp;
2614
2615         envp = getenv("OOMScoreAdjust");
2616         if (envp) {
2617                 condlog(3, "Using systemd provided OOMScoreAdjust");
2618                 return;
2619         }
2620         do {
2621                 if (stat(file, &st) == 0){
2622                         fp = fopen(file, "w");
2623                         if (!fp) {
2624                                 condlog(0, "couldn't fopen %s : %s", file,
2625                                         strerror(errno));
2626                                 return;
2627                         }
2628                         fprintf(fp, "%i", score);
2629                         fclose(fp);
2630                         return;
2631                 }
2632                 if (errno != ENOENT) {
2633                         condlog(0, "couldn't stat %s : %s", file,
2634                                 strerror(errno));
2635                         return;
2636                 }
2637 #ifdef OOM_ADJUST_MIN
2638                 file = "/proc/self/oom_adj";
2639                 score = OOM_ADJUST_MIN;
2640 #else
2641                 retry = 0;
2642 #endif
2643         } while (retry--);
2644         condlog(0, "couldn't adjust oom score");
2645 }
2646
2647 static int
2648 child (void * param)
2649 {
2650         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2651         pthread_attr_t log_attr, misc_attr, uevent_attr;
2652         struct vectors * vecs;
2653         struct multipath * mpp;
2654         int i;
2655 #ifdef USE_SYSTEMD
2656         unsigned long checkint;
2657         int startup_done = 0;
2658 #endif
2659         int rc;
2660         int pid_fd = -1;
2661         struct config *conf;
2662         char *envp;
2663         int queue_without_daemon;
2664
2665         mlockall(MCL_CURRENT | MCL_FUTURE);
2666         signal_init();
2667         rcu_init();
2668
2669         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2670         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2671         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2672         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2673
2674         if (logsink == 1) {
2675                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2676                 log_thread_start(&log_attr);
2677                 pthread_attr_destroy(&log_attr);
2678         }
2679         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2680         if (pid_fd < 0) {
2681                 condlog(1, "failed to create pidfile");
2682                 if (logsink == 1)
2683                         log_thread_stop();
2684                 exit(1);
2685         }
2686
2687         post_config_state(DAEMON_START);
2688
2689         condlog(2, "--------start up--------");
2690         condlog(2, "read " DEFAULT_CONFIGFILE);
2691
2692         conf = load_config(DEFAULT_CONFIGFILE);
2693         if (!conf)
2694                 goto failed;
2695
2696         if (verbosity)
2697                 conf->verbosity = verbosity;
2698         if (bindings_read_only)
2699                 conf->bindings_read_only = bindings_read_only;
2700         uxsock_timeout = conf->uxsock_timeout;
2701         rcu_assign_pointer(multipath_conf, conf);
2702         if (init_checkers(conf->multipath_dir)) {
2703                 condlog(0, "failed to initialize checkers");
2704                 goto failed;
2705         }
2706         if (init_prio(conf->multipath_dir)) {
2707                 condlog(0, "failed to initialize prioritizers");
2708                 goto failed;
2709         }
2710         /* Failing this is non-fatal */
2711
2712         init_foreign(conf->multipath_dir);
2713
2714         if (poll_dmevents)
2715                 poll_dmevents = dmevent_poll_supported();
2716         setlogmask(LOG_UPTO(conf->verbosity + 3));
2717
2718         envp = getenv("LimitNOFILE");
2719
2720         if (envp)
2721                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2722         else
2723                 set_max_fds(conf->max_fds);
2724
2725         vecs = gvecs = init_vecs();
2726         if (!vecs)
2727                 goto failed;
2728
2729         setscheduler();
2730         set_oom_adj();
2731
2732 #ifdef USE_SYSTEMD
2733         envp = getenv("WATCHDOG_USEC");
2734         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2735                 /* Value is in microseconds */
2736                 conf->max_checkint = checkint / 1000000;
2737                 /* Rescale checkint */
2738                 if (conf->checkint > conf->max_checkint)
2739                         conf->checkint = conf->max_checkint;
2740                 else
2741                         conf->checkint = conf->max_checkint / 4;
2742                 condlog(3, "enabling watchdog, interval %d max %d",
2743                         conf->checkint, conf->max_checkint);
2744                 use_watchdog = conf->checkint;
2745         }
2746 #endif
2747         /*
2748          * Startup done, invalidate configuration
2749          */
2750         conf = NULL;
2751
2752         pthread_cleanup_push(config_cleanup, NULL);
2753         pthread_mutex_lock(&config_lock);
2754
2755         __post_config_state(DAEMON_IDLE);
2756         rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs);
2757         if (!rc) {
2758                 /* Wait for uxlsnr startup */
2759                 while (running_state == DAEMON_IDLE)
2760                         pthread_cond_wait(&config_cond, &config_lock);
2761         }
2762         pthread_cleanup_pop(1);
2763
2764         if (rc) {
2765                 condlog(0, "failed to create cli listener: %d", rc);
2766                 goto failed;
2767         }
2768         else if (running_state != DAEMON_CONFIGURE) {
2769                 condlog(0, "cli listener failed to start");
2770                 goto failed;
2771         }
2772
2773         if (poll_dmevents) {
2774                 if (init_dmevent_waiter(vecs)) {
2775                         condlog(0, "failed to allocate dmevents waiter info");
2776                         goto failed;
2777                 }
2778                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2779                                          wait_dmevents, NULL))) {
2780                         condlog(0, "failed to create dmevent waiter thread: %d",
2781                                 rc);
2782                         goto failed;
2783                 }
2784         }
2785
2786         /*
2787          * Start uevent listener early to catch events
2788          */
2789         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2790                 condlog(0, "failed to create uevent thread: %d", rc);
2791                 goto failed;
2792         }
2793         pthread_attr_destroy(&uevent_attr);
2794
2795         /*
2796          * start threads
2797          */
2798         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2799                 condlog(0,"failed to create checker loop thread: %d", rc);
2800                 goto failed;
2801         }
2802         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2803                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2804                 goto failed;
2805         }
2806         pthread_attr_destroy(&misc_attr);
2807
2808         while (running_state != DAEMON_SHUTDOWN) {
2809                 pthread_cleanup_push(config_cleanup, NULL);
2810                 pthread_mutex_lock(&config_lock);
2811                 if (running_state != DAEMON_CONFIGURE &&
2812                     running_state != DAEMON_SHUTDOWN) {
2813                         pthread_cond_wait(&config_cond, &config_lock);
2814                 }
2815                 pthread_cleanup_pop(1);
2816                 if (running_state == DAEMON_CONFIGURE) {
2817                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2818                         lock(&vecs->lock);
2819                         pthread_testcancel();
2820                         if (!need_to_delay_reconfig(vecs)) {
2821                                 reconfigure(vecs);
2822                         } else {
2823                                 conf = get_multipath_config();
2824                                 conf->delayed_reconfig = 1;
2825                                 put_multipath_config(conf);
2826                         }
2827                         lock_cleanup_pop(vecs->lock);
2828                         post_config_state(DAEMON_IDLE);
2829 #ifdef USE_SYSTEMD
2830                         if (!startup_done) {
2831                                 sd_notify(0, "READY=1");
2832                                 startup_done = 1;
2833                         }
2834 #endif
2835                 }
2836         }
2837
2838         lock(&vecs->lock);
2839         conf = get_multipath_config();
2840         queue_without_daemon = conf->queue_without_daemon;
2841         put_multipath_config(conf);
2842         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2843                 vector_foreach_slot(vecs->mpvec, mpp, i)
2844                         dm_queue_if_no_path(mpp->alias, 0);
2845         remove_maps_and_stop_waiters(vecs);
2846         unlock(&vecs->lock);
2847
2848         pthread_cancel(check_thr);
2849         pthread_cancel(uevent_thr);
2850         pthread_cancel(uxlsnr_thr);
2851         pthread_cancel(uevq_thr);
2852         if (poll_dmevents)
2853                 pthread_cancel(dmevent_thr);
2854
2855         pthread_join(check_thr, NULL);
2856         pthread_join(uevent_thr, NULL);
2857         pthread_join(uxlsnr_thr, NULL);
2858         pthread_join(uevq_thr, NULL);
2859         if (poll_dmevents)
2860                 pthread_join(dmevent_thr, NULL);
2861
2862         stop_io_err_stat_thread();
2863
2864         lock(&vecs->lock);
2865         free_pathvec(vecs->pathvec, FREE_PATHS);
2866         vecs->pathvec = NULL;
2867         unlock(&vecs->lock);
2868
2869         pthread_mutex_destroy(&vecs->lock.mutex);
2870         FREE(vecs);
2871         vecs = NULL;
2872
2873         cleanup_foreign();
2874         cleanup_checkers();
2875         cleanup_prio();
2876         if (poll_dmevents)
2877                 cleanup_dmevent_waiter();
2878
2879         dm_lib_release();
2880         dm_lib_exit();
2881
2882         /* We're done here */
2883         condlog(3, "unlink pidfile");
2884         unlink(DEFAULT_PIDFILE);
2885
2886         condlog(2, "--------shut down-------");
2887
2888         if (logsink == 1)
2889                 log_thread_stop();
2890
2891         /*
2892          * Freeing config must be done after condlog() and dm_lib_exit(),
2893          * because logging functions like dlog() and dm_write_log()
2894          * reference the config.
2895          */
2896         conf = rcu_dereference(multipath_conf);
2897         rcu_assign_pointer(multipath_conf, NULL);
2898         call_rcu(&conf->rcu, rcu_free_config);
2899         udev_unref(udev);
2900         udev = NULL;
2901         pthread_attr_destroy(&waiter_attr);
2902         pthread_attr_destroy(&io_err_stat_attr);
2903 #ifdef _DEBUG_
2904         dbg_free_final(NULL);
2905 #endif
2906
2907 #ifdef USE_SYSTEMD
2908         sd_notify(0, "ERRNO=0");
2909 #endif
2910         exit(0);
2911
2912 failed:
2913 #ifdef USE_SYSTEMD
2914         sd_notify(0, "ERRNO=1");
2915 #endif
2916         if (pid_fd >= 0)
2917                 close(pid_fd);
2918         exit(1);
2919 }
2920
2921 static int
2922 daemonize(void)
2923 {
2924         int pid;
2925         int dev_null_fd;
2926
2927         if( (pid = fork()) < 0){
2928                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2929                 return -1;
2930         }
2931         else if (pid != 0)
2932                 return pid;
2933
2934         setsid();
2935
2936         if ( (pid = fork()) < 0)
2937                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2938         else if (pid != 0)
2939                 _exit(0);
2940
2941         if (chdir("/") < 0)
2942                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2943
2944         dev_null_fd = open("/dev/null", O_RDWR);
2945         if (dev_null_fd < 0){
2946                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2947                         strerror(errno));
2948                 _exit(0);
2949         }
2950
2951         close(STDIN_FILENO);
2952         if (dup(dev_null_fd) < 0) {
2953                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2954                         strerror(errno));
2955                 _exit(0);
2956         }
2957         close(STDOUT_FILENO);
2958         if (dup(dev_null_fd) < 0) {
2959                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2960                         strerror(errno));
2961                 _exit(0);
2962         }
2963         close(STDERR_FILENO);
2964         if (dup(dev_null_fd) < 0) {
2965                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2966                         strerror(errno));
2967                 _exit(0);
2968         }
2969         close(dev_null_fd);
2970         daemon_pid = getpid();
2971         return 0;
2972 }
2973
2974 int
2975 main (int argc, char *argv[])
2976 {
2977         extern char *optarg;
2978         extern int optind;
2979         int arg;
2980         int err;
2981         int foreground = 0;
2982         struct config *conf;
2983
2984         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2985                                    "Manipulated through RCU");
2986         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2987                 "Suppress complaints about unprotected running_state reads");
2988         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2989                 "Suppress complaints about this scalar variable");
2990
2991         logsink = 1;
2992
2993         if (getuid() != 0) {
2994                 fprintf(stderr, "need to be root\n");
2995                 exit(1);
2996         }
2997
2998         /* make sure we don't lock any path */
2999         if (chdir("/") < 0)
3000                 fprintf(stderr, "can't chdir to root directory : %s\n",
3001                         strerror(errno));
3002         umask(umask(077) | 022);
3003
3004         pthread_cond_init_mono(&config_cond);
3005
3006         udev = udev_new();
3007         libmp_udev_set_sync_support(0);
3008
3009         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
3010                 switch(arg) {
3011                 case 'd':
3012                         foreground = 1;
3013                         if (logsink > 0)
3014                                 logsink = 0;
3015                         //debug=1; /* ### comment me out ### */
3016                         break;
3017                 case 'v':
3018                         if (sizeof(optarg) > sizeof(char *) ||
3019                             !isdigit(optarg[0]))
3020                                 exit(1);
3021
3022                         verbosity = atoi(optarg);
3023                         break;
3024                 case 's':
3025                         logsink = -1;
3026                         break;
3027                 case 'k':
3028                         logsink = 0;
3029                         conf = load_config(DEFAULT_CONFIGFILE);
3030                         if (!conf)
3031                                 exit(1);
3032                         if (verbosity)
3033                                 conf->verbosity = verbosity;
3034                         uxsock_timeout = conf->uxsock_timeout;
3035                         err = uxclnt(optarg, uxsock_timeout + 100);
3036                         free_config(conf);
3037                         return err;
3038                 case 'B':
3039                         bindings_read_only = 1;
3040                         break;
3041                 case 'n':
3042                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3043                         break;
3044                 case 'w':
3045                         poll_dmevents = 0;
3046                         break;
3047                 default:
3048                         fprintf(stderr, "Invalid argument '-%c'\n",
3049                                 optopt);
3050                         exit(1);
3051                 }
3052         }
3053         if (optind < argc) {
3054                 char cmd[CMDSIZE];
3055                 char * s = cmd;
3056                 char * c = s;
3057
3058                 logsink = 0;
3059                 conf = load_config(DEFAULT_CONFIGFILE);
3060                 if (!conf)
3061                         exit(1);
3062                 if (verbosity)
3063                         conf->verbosity = verbosity;
3064                 uxsock_timeout = conf->uxsock_timeout;
3065                 memset(cmd, 0x0, CMDSIZE);
3066                 while (optind < argc) {
3067                         if (strchr(argv[optind], ' '))
3068                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3069                         else
3070                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3071                         optind++;
3072                 }
3073                 c += snprintf(c, s + CMDSIZE - c, "\n");
3074                 err = uxclnt(s, uxsock_timeout + 100);
3075                 free_config(conf);
3076                 return err;
3077         }
3078
3079         if (foreground) {
3080                 if (!isatty(fileno(stdout)))
3081                         setbuf(stdout, NULL);
3082                 err = 0;
3083                 daemon_pid = getpid();
3084         } else
3085                 err = daemonize();
3086
3087         if (err < 0)
3088                 /* error */
3089                 exit(1);
3090         else if (err > 0)
3091                 /* parent dies */
3092                 exit(0);
3093         else
3094                 /* child lives */
3095                 return (child(NULL));
3096 }
3097
3098 void *  mpath_pr_event_handler_fn (void * pathp )
3099 {
3100         struct multipath * mpp;
3101         int i, ret, isFound;
3102         struct path * pp = (struct path *)pathp;
3103         struct prout_param_descriptor *param;
3104         struct prin_resp *resp;
3105
3106         rcu_register_thread();
3107         mpp = pp->mpp;
3108
3109         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3110         if (!resp){
3111                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3112                 goto out;
3113         }
3114
3115         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3116         if (ret != MPATH_PR_SUCCESS )
3117         {
3118                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3119                 goto out;
3120         }
3121
3122         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3123                         resp->prin_descriptor.prin_readkeys.additional_length );
3124
3125         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3126         {
3127                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3128                 ret = MPATH_PR_SUCCESS;
3129                 goto out;
3130         }
3131         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3132                 get_be64(mpp->reservation_key));
3133
3134         isFound =0;
3135         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3136         {
3137                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3138                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3139                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3140                 {
3141                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3142                         isFound =1;
3143                         break;
3144                 }
3145         }
3146         if (!isFound)
3147         {
3148                 condlog(0, "%s: Either device not registered or ", pp->dev);
3149                 condlog(0, "host is not authorised for registration. Skip path");
3150                 ret = MPATH_PR_OTHER;
3151                 goto out;
3152         }
3153
3154         param= malloc(sizeof(struct prout_param_descriptor));
3155         memset(param, 0 , sizeof(struct prout_param_descriptor));
3156         param->sa_flags = mpp->sa_flags;
3157         memcpy(param->sa_key, &mpp->reservation_key, 8);
3158         param->num_transportid = 0;
3159
3160         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3161
3162         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3163         if (ret != MPATH_PR_SUCCESS )
3164         {
3165                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3166         }
3167         mpp->prflag = 1;
3168
3169         free(param);
3170 out:
3171         if (resp)
3172                 free(resp);
3173         rcu_unregister_thread();
3174         return NULL;
3175 }
3176
3177 int mpath_pr_event_handle(struct path *pp)
3178 {
3179         pthread_t thread;
3180         int rc;
3181         pthread_attr_t attr;
3182         struct multipath * mpp;
3183
3184         if (pp->bus != SYSFS_BUS_SCSI)
3185                 return 0;
3186
3187         mpp = pp->mpp;
3188
3189         if (!get_be64(mpp->reservation_key))
3190                 return -1;
3191
3192         pthread_attr_init(&attr);
3193         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3194
3195         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3196         if (rc) {
3197                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3198                 return -1;
3199         }
3200         pthread_attr_destroy(&attr);
3201         rc = pthread_join(thread, NULL);
3202         return 0;
3203 }