multipathd: open client socket early
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <limits.h>
16 #include <linux/oom.h>
17 #include <libudev.h>
18 #include <urcu.h>
19 #ifdef USE_SYSTEMD
20 #include <systemd/sd-daemon.h>
21 #endif
22 #include <semaphore.h>
23 #include <time.h>
24 #include <stdbool.h>
25
26 /*
27  * libmultipath
28  */
29 #include "time-util.h"
30
31 /*
32  * libcheckers
33  */
34 #include "checkers.h"
35
36 #ifdef USE_SYSTEMD
37 static int use_watchdog;
38 #endif
39
40 /*
41  * libmultipath
42  */
43 #include "parser.h"
44 #include "vector.h"
45 #include "memory.h"
46 #include "config.h"
47 #include "util.h"
48 #include "hwtable.h"
49 #include "defaults.h"
50 #include "structs.h"
51 #include "blacklist.h"
52 #include "structs_vec.h"
53 #include "dmparser.h"
54 #include "devmapper.h"
55 #include "sysfs.h"
56 #include "dict.h"
57 #include "discovery.h"
58 #include "debug.h"
59 #include "propsel.h"
60 #include "uevent.h"
61 #include "switchgroup.h"
62 #include "print.h"
63 #include "configure.h"
64 #include "prio.h"
65 #include "wwids.h"
66 #include "pgpolicies.h"
67 #include "uevent.h"
68 #include "log.h"
69 #include "uxsock.h"
70
71 #include "mpath_cmd.h"
72 #include "mpath_persist.h"
73
74 #include "prioritizers/alua_rtpg.h"
75
76 #include "main.h"
77 #include "pidfile.h"
78 #include "uxlsnr.h"
79 #include "uxclnt.h"
80 #include "cli.h"
81 #include "cli_handlers.h"
82 #include "lock.h"
83 #include "waiter.h"
84 #include "dmevents.h"
85 #include "io_err_stat.h"
86 #include "wwids.h"
87 #include "foreign.h"
88 #include "../third-party/valgrind/drd.h"
89
90 #define FILE_NAME_SIZE 256
91 #define CMDSIZE 160
92
93 #define LOG_MSG(lvl, verb, pp)                                  \
94 do {                                                            \
95         if (lvl <= verb) {                                      \
96                 if (pp->offline)                                \
97                         condlog(lvl, "%s: %s - path offline",   \
98                                 pp->mpp->alias, pp->dev);       \
99                 else  {                                         \
100                         const char *__m =                       \
101                                 checker_message(&pp->checker);  \
102                                                                 \
103                         if (strlen(__m))                              \
104                                 condlog(lvl, "%s: %s - %s checker%s", \
105                                         pp->mpp->alias,               \
106                                         pp->dev,                      \
107                                         checker_name(&pp->checker),   \
108                                         __m);                         \
109                 }                                                     \
110         }                                                             \
111 } while(0)
112
113 struct mpath_event_param
114 {
115         char * devname;
116         struct multipath *mpp;
117 };
118
119 int logsink;
120 int uxsock_timeout;
121 int verbosity;
122 int bindings_read_only;
123 int ignore_new_devs;
124 #ifdef NO_DMEVENTS_POLL
125 int poll_dmevents = 0;
126 #else
127 int poll_dmevents = 1;
128 #endif
129 enum daemon_status running_state = DAEMON_INIT;
130 pid_t daemon_pid;
131 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
132 pthread_cond_t config_cond;
133
134 /*
135  * global copy of vecs for use in sig handlers
136  */
137 struct vectors * gvecs;
138
139 struct udev * udev;
140
141 struct config *multipath_conf;
142
143 /* Local variables */
144 static volatile sig_atomic_t exit_sig;
145 static volatile sig_atomic_t reconfig_sig;
146 static volatile sig_atomic_t log_reset_sig;
147
148 const char *
149 daemon_status(void)
150 {
151         switch (running_state) {
152         case DAEMON_INIT:
153                 return "init";
154         case DAEMON_START:
155                 return "startup";
156         case DAEMON_CONFIGURE:
157                 return "configure";
158         case DAEMON_IDLE:
159                 return "idle";
160         case DAEMON_RUNNING:
161                 return "running";
162         case DAEMON_SHUTDOWN:
163                 return "shutdown";
164         }
165         return NULL;
166 }
167
168 /*
169  * I love you too, systemd ...
170  */
171 const char *
172 sd_notify_status(void)
173 {
174         switch (running_state) {
175         case DAEMON_INIT:
176                 return "STATUS=init";
177         case DAEMON_START:
178                 return "STATUS=startup";
179         case DAEMON_CONFIGURE:
180                 return "STATUS=configure";
181         case DAEMON_IDLE:
182         case DAEMON_RUNNING:
183                 return "STATUS=up";
184         case DAEMON_SHUTDOWN:
185                 return "STATUS=shutdown";
186         }
187         return NULL;
188 }
189
190 #ifdef USE_SYSTEMD
191 static void do_sd_notify(enum daemon_status old_state)
192 {
193         /*
194          * Checkerloop switches back and forth between idle and running state.
195          * No need to tell systemd each time.
196          * These notifications cause a lot of overhead on dbus.
197          */
198         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
199             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
200                 return;
201         sd_notify(0, sd_notify_status());
202 }
203 #endif
204
205 static void config_cleanup(void *arg)
206 {
207         pthread_mutex_unlock(&config_lock);
208 }
209
210 void post_config_state(enum daemon_status state)
211 {
212         pthread_mutex_lock(&config_lock);
213         if (state != running_state) {
214                 enum daemon_status old_state = running_state;
215
216                 running_state = state;
217                 pthread_cond_broadcast(&config_cond);
218 #ifdef USE_SYSTEMD
219                 do_sd_notify(old_state);
220 #endif
221         }
222         pthread_mutex_unlock(&config_lock);
223 }
224
225 int set_config_state(enum daemon_status state)
226 {
227         int rc = 0;
228
229         pthread_cleanup_push(config_cleanup, NULL);
230         pthread_mutex_lock(&config_lock);
231         if (running_state != state) {
232                 enum daemon_status old_state = running_state;
233
234                 if (running_state != DAEMON_IDLE) {
235                         struct timespec ts;
236
237                         clock_gettime(CLOCK_MONOTONIC, &ts);
238                         ts.tv_sec += 1;
239                         rc = pthread_cond_timedwait(&config_cond,
240                                                     &config_lock, &ts);
241                 }
242                 if (!rc) {
243                         running_state = state;
244                         pthread_cond_broadcast(&config_cond);
245 #ifdef USE_SYSTEMD
246                         do_sd_notify(old_state);
247 #endif
248                 }
249         }
250         pthread_cleanup_pop(1);
251         return rc;
252 }
253
254 struct config *get_multipath_config(void)
255 {
256         rcu_read_lock();
257         return rcu_dereference(multipath_conf);
258 }
259
260 void put_multipath_config(void *arg)
261 {
262         rcu_read_unlock();
263 }
264
265 static int
266 need_switch_pathgroup (struct multipath * mpp, int refresh)
267 {
268         struct pathgroup * pgp;
269         struct path * pp;
270         unsigned int i, j;
271         struct config *conf;
272         int bestpg;
273
274         if (!mpp)
275                 return 0;
276
277         /*
278          * Refresh path priority values
279          */
280         if (refresh) {
281                 vector_foreach_slot (mpp->pg, pgp, i) {
282                         vector_foreach_slot (pgp->paths, pp, j) {
283                                 conf = get_multipath_config();
284                                 pthread_cleanup_push(put_multipath_config,
285                                                      conf);
286                                 pathinfo(pp, conf, DI_PRIO);
287                                 pthread_cleanup_pop(1);
288                         }
289                 }
290         }
291
292         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
293                 return 0;
294
295         bestpg = select_path_group(mpp);
296         if (mpp->pgfailback == -FAILBACK_MANUAL)
297                 return 0;
298
299         mpp->bestpg = bestpg;
300         if (mpp->bestpg != mpp->nextpg)
301                 return 1;
302
303         return 0;
304 }
305
306 static void
307 switch_pathgroup (struct multipath * mpp)
308 {
309         mpp->stat_switchgroup++;
310         dm_switchgroup(mpp->alias, mpp->bestpg);
311         condlog(2, "%s: switch to path group #%i",
312                  mpp->alias, mpp->bestpg);
313 }
314
315 static int
316 wait_for_events(struct multipath *mpp, struct vectors *vecs)
317 {
318         if (poll_dmevents)
319                 return watch_dmevents(mpp->alias);
320         else
321                 return start_waiter_thread(mpp, vecs);
322 }
323
324 static void
325 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
326 {
327         /* devices are automatically removed by the dmevent polling code,
328          * so they don't need to be manually removed here */
329         if (!poll_dmevents)
330                 stop_waiter_thread(mpp, vecs);
331         remove_map(mpp, vecs, PURGE_VEC);
332 }
333
334 static void
335 remove_maps_and_stop_waiters(struct vectors *vecs)
336 {
337         int i;
338         struct multipath * mpp;
339
340         if (!vecs)
341                 return;
342
343         if (!poll_dmevents) {
344                 vector_foreach_slot(vecs->mpvec, mpp, i)
345                         stop_waiter_thread(mpp, vecs);
346         }
347         else
348                 unwatch_all_dmevents();
349
350         remove_maps(vecs);
351 }
352
353 static void
354 set_multipath_wwid (struct multipath * mpp)
355 {
356         if (strlen(mpp->wwid))
357                 return;
358
359         dm_get_uuid(mpp->alias, mpp->wwid);
360 }
361
362 static void set_no_path_retry(struct multipath *mpp)
363 {
364         char is_queueing = 0;
365
366         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
367         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
368                 is_queueing = 1;
369
370         switch (mpp->no_path_retry) {
371         case NO_PATH_RETRY_UNDEF:
372                 break;
373         case NO_PATH_RETRY_FAIL:
374                 if (is_queueing)
375                         dm_queue_if_no_path(mpp->alias, 0);
376                 break;
377         case NO_PATH_RETRY_QUEUE:
378                 if (!is_queueing)
379                         dm_queue_if_no_path(mpp->alias, 1);
380                 break;
381         default:
382                 if (mpp->nr_active > 0) {
383                         mpp->retry_tick = 0;
384                         dm_queue_if_no_path(mpp->alias, 1);
385                 } else if (is_queueing && mpp->retry_tick == 0)
386                         enter_recovery_mode(mpp);
387                 break;
388         }
389 }
390
391 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
392                       int reset)
393 {
394         if (dm_get_info(mpp->alias, &mpp->dmi)) {
395                 /* Error accessing table */
396                 condlog(3, "%s: cannot access table", mpp->alias);
397                 goto out;
398         }
399
400         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
401                 condlog(0, "%s: failed to setup multipath", mpp->alias);
402                 goto out;
403         }
404
405         if (reset) {
406                 set_no_path_retry(mpp);
407                 if (VECTOR_SIZE(mpp->paths) != 0)
408                         dm_cancel_deferred_remove(mpp);
409         }
410
411         return 0;
412 out:
413         remove_map_and_stop_waiter(mpp, vecs);
414         return 1;
415 }
416
417 int update_multipath (struct vectors *vecs, char *mapname, int reset)
418 {
419         struct multipath *mpp;
420         struct pathgroup  *pgp;
421         struct path *pp;
422         int i, j;
423
424         mpp = find_mp_by_alias(vecs->mpvec, mapname);
425
426         if (!mpp) {
427                 condlog(3, "%s: multipath map not found", mapname);
428                 return 2;
429         }
430
431         if (__setup_multipath(vecs, mpp, reset))
432                 return 1; /* mpp freed in setup_multipath */
433
434         /*
435          * compare checkers states with DM states
436          */
437         vector_foreach_slot (mpp->pg, pgp, i) {
438                 vector_foreach_slot (pgp->paths, pp, j) {
439                         if (pp->dmstate != PSTATE_FAILED)
440                                 continue;
441
442                         if (pp->state != PATH_DOWN) {
443                                 struct config *conf;
444                                 int oldstate = pp->state;
445                                 int checkint;
446
447                                 conf = get_multipath_config();
448                                 checkint = conf->checkint;
449                                 put_multipath_config(conf);
450                                 condlog(2, "%s: mark as failed", pp->dev);
451                                 mpp->stat_path_failures++;
452                                 pp->state = PATH_DOWN;
453                                 if (oldstate == PATH_UP ||
454                                     oldstate == PATH_GHOST)
455                                         update_queue_mode_del_path(mpp);
456
457                                 /*
458                                  * if opportune,
459                                  * schedule the next check earlier
460                                  */
461                                 if (pp->tick > checkint)
462                                         pp->tick = checkint;
463                         }
464                 }
465         }
466         return 0;
467 }
468
469 static int
470 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
471 {
472         int retries = 3;
473         char params[PARAMS_SIZE] = {0};
474
475 retry:
476         condlog(4, "%s: updating new map", mpp->alias);
477         if (adopt_paths(vecs->pathvec, mpp)) {
478                 condlog(0, "%s: failed to adopt paths for new map update",
479                         mpp->alias);
480                 retries = -1;
481                 goto fail;
482         }
483         verify_paths(mpp, vecs);
484         mpp->action = ACT_RELOAD;
485
486         extract_hwe_from_path(mpp);
487         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
488                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
489                 retries = -1;
490                 goto fail;
491         }
492         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
493                 condlog(0, "%s: map_udate sleep", mpp->alias);
494                 sleep(1);
495                 goto retry;
496         }
497         dm_lib_release();
498
499 fail:
500         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
501                 condlog(0, "%s: failed to create new map", mpp->alias);
502                 remove_map(mpp, vecs, 1);
503                 return 1;
504         }
505
506         if (setup_multipath(vecs, mpp))
507                 return 1;
508
509         sync_map_state(mpp);
510
511         if (retries < 0)
512                 condlog(0, "%s: failed reload in new map update", mpp->alias);
513         return 0;
514 }
515
516 static struct multipath *
517 add_map_without_path (struct vectors *vecs, const char *alias)
518 {
519         struct multipath * mpp = alloc_multipath();
520         struct config *conf;
521
522         if (!mpp)
523                 return NULL;
524         if (!alias) {
525                 FREE(mpp);
526                 return NULL;
527         }
528
529         mpp->alias = STRDUP(alias);
530
531         if (dm_get_info(mpp->alias, &mpp->dmi)) {
532                 condlog(3, "%s: cannot access table", mpp->alias);
533                 goto out;
534         }
535         set_multipath_wwid(mpp);
536         conf = get_multipath_config();
537         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
538         put_multipath_config(conf);
539
540         if (update_multipath_table(mpp, vecs->pathvec, 1))
541                 goto out;
542         if (update_multipath_status(mpp))
543                 goto out;
544
545         if (!vector_alloc_slot(vecs->mpvec))
546                 goto out;
547
548         vector_set_slot(vecs->mpvec, mpp);
549
550         if (update_map(mpp, vecs, 1) != 0) /* map removed */
551                 return NULL;
552
553         return mpp;
554 out:
555         remove_map(mpp, vecs, PURGE_VEC);
556         return NULL;
557 }
558
559 static int
560 coalesce_maps(struct vectors *vecs, vector nmpv)
561 {
562         struct multipath * ompp;
563         vector ompv = vecs->mpvec;
564         unsigned int i, reassign_maps;
565         struct config *conf;
566
567         conf = get_multipath_config();
568         reassign_maps = conf->reassign_maps;
569         put_multipath_config(conf);
570         vector_foreach_slot (ompv, ompp, i) {
571                 condlog(3, "%s: coalesce map", ompp->alias);
572                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
573                         /*
574                          * remove all current maps not allowed by the
575                          * current configuration
576                          */
577                         if (dm_flush_map(ompp->alias)) {
578                                 condlog(0, "%s: unable to flush devmap",
579                                         ompp->alias);
580                                 /*
581                                  * may be just because the device is open
582                                  */
583                                 if (setup_multipath(vecs, ompp) != 0) {
584                                         i--;
585                                         continue;
586                                 }
587                                 if (!vector_alloc_slot(nmpv))
588                                         return 1;
589
590                                 vector_set_slot(nmpv, ompp);
591
592                                 vector_del_slot(ompv, i);
593                                 i--;
594                         }
595                         else {
596                                 dm_lib_release();
597                                 condlog(2, "%s devmap removed", ompp->alias);
598                         }
599                 } else if (reassign_maps) {
600                         condlog(3, "%s: Reassign existing device-mapper"
601                                 " devices", ompp->alias);
602                         dm_reassign(ompp->alias);
603                 }
604         }
605         return 0;
606 }
607
608 static void
609 sync_maps_state(vector mpvec)
610 {
611         unsigned int i;
612         struct multipath *mpp;
613
614         vector_foreach_slot (mpvec, mpp, i)
615                 sync_map_state(mpp);
616 }
617
618 static int
619 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
620 {
621         int r;
622
623         if (nopaths)
624                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
625         else
626                 r = dm_flush_map(mpp->alias);
627         /*
628          * clear references to this map before flushing so we can ignore
629          * the spurious uevent we may generate with the dm_flush_map call below
630          */
631         if (r) {
632                 /*
633                  * May not really be an error -- if the map was already flushed
634                  * from the device mapper by dmsetup(8) for instance.
635                  */
636                 if (r == 1)
637                         condlog(0, "%s: can't flush", mpp->alias);
638                 else {
639                         condlog(2, "%s: devmap deferred remove", mpp->alias);
640                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
641                 }
642                 return r;
643         }
644         else {
645                 dm_lib_release();
646                 condlog(2, "%s: map flushed", mpp->alias);
647         }
648
649         orphan_paths(vecs->pathvec, mpp);
650         remove_map_and_stop_waiter(mpp, vecs);
651
652         return 0;
653 }
654
655 static int
656 uev_add_map (struct uevent * uev, struct vectors * vecs)
657 {
658         char *alias;
659         int major = -1, minor = -1, rc;
660
661         condlog(3, "%s: add map (uevent)", uev->kernel);
662         alias = uevent_get_dm_name(uev);
663         if (!alias) {
664                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
665                 major = uevent_get_major(uev);
666                 minor = uevent_get_minor(uev);
667                 alias = dm_mapname(major, minor);
668                 if (!alias) {
669                         condlog(2, "%s: mapname not found for %d:%d",
670                                 uev->kernel, major, minor);
671                         return 1;
672                 }
673         }
674         pthread_cleanup_push(cleanup_lock, &vecs->lock);
675         lock(&vecs->lock);
676         pthread_testcancel();
677         rc = ev_add_map(uev->kernel, alias, vecs);
678         lock_cleanup_pop(vecs->lock);
679         FREE(alias);
680         return rc;
681 }
682
683 /*
684  * ev_add_map expects that the multipath device already exists in kernel
685  * before it is called. It just adds a device to multipathd or updates an
686  * existing device.
687  */
688 int
689 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
690 {
691         struct multipath * mpp;
692         int delayed_reconfig, reassign_maps;
693         struct config *conf;
694
695         if (!dm_is_mpath(alias)) {
696                 condlog(4, "%s: not a multipath map", alias);
697                 return 0;
698         }
699
700         mpp = find_mp_by_alias(vecs->mpvec, alias);
701
702         if (mpp) {
703                 if (mpp->wait_for_udev > 1) {
704                         condlog(2, "%s: performing delayed actions",
705                                 mpp->alias);
706                         if (update_map(mpp, vecs, 0))
707                                 /* setup multipathd removed the map */
708                                 return 1;
709                 }
710                 conf = get_multipath_config();
711                 delayed_reconfig = conf->delayed_reconfig;
712                 reassign_maps = conf->reassign_maps;
713                 put_multipath_config(conf);
714                 if (mpp->wait_for_udev) {
715                         mpp->wait_for_udev = 0;
716                         if (delayed_reconfig &&
717                             !need_to_delay_reconfig(vecs)) {
718                                 condlog(2, "reconfigure (delayed)");
719                                 set_config_state(DAEMON_CONFIGURE);
720                                 return 0;
721                         }
722                 }
723                 /*
724                  * Not really an error -- we generate our own uevent
725                  * if we create a multipath mapped device as a result
726                  * of uev_add_path
727                  */
728                 if (reassign_maps) {
729                         condlog(3, "%s: Reassign existing device-mapper devices",
730                                 alias);
731                         dm_reassign(alias);
732                 }
733                 return 0;
734         }
735         condlog(2, "%s: adding map", alias);
736
737         /*
738          * now we can register the map
739          */
740         if ((mpp = add_map_without_path(vecs, alias))) {
741                 sync_map_state(mpp);
742                 condlog(2, "%s: devmap %s registered", alias, dev);
743                 return 0;
744         } else {
745                 condlog(2, "%s: ev_add_map failed", dev);
746                 return 1;
747         }
748 }
749
750 static int
751 uev_remove_map (struct uevent * uev, struct vectors * vecs)
752 {
753         char *alias;
754         int minor;
755         struct multipath *mpp;
756
757         condlog(3, "%s: remove map (uevent)", uev->kernel);
758         alias = uevent_get_dm_name(uev);
759         if (!alias) {
760                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
761                 return 0;
762         }
763         minor = uevent_get_minor(uev);
764
765         pthread_cleanup_push(cleanup_lock, &vecs->lock);
766         lock(&vecs->lock);
767         pthread_testcancel();
768         mpp = find_mp_by_minor(vecs->mpvec, minor);
769
770         if (!mpp) {
771                 condlog(2, "%s: devmap not registered, can't remove",
772                         uev->kernel);
773                 goto out;
774         }
775         if (strcmp(mpp->alias, alias)) {
776                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
777                         mpp->alias, mpp->dmi->minor, minor);
778                 goto out;
779         }
780
781         orphan_paths(vecs->pathvec, mpp);
782         remove_map_and_stop_waiter(mpp, vecs);
783 out:
784         lock_cleanup_pop(vecs->lock);
785         FREE(alias);
786         return 0;
787 }
788
789 /* Called from CLI handler */
790 int
791 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
792 {
793         struct multipath * mpp;
794
795         mpp = find_mp_by_minor(vecs->mpvec, minor);
796
797         if (!mpp) {
798                 condlog(2, "%s: devmap not registered, can't remove",
799                         devname);
800                 return 1;
801         }
802         if (strcmp(mpp->alias, alias)) {
803                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
804                         mpp->alias, mpp->dmi->minor, minor);
805                 return 1;
806         }
807         return flush_map(mpp, vecs, 0);
808 }
809
810 static int
811 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
812 {
813         struct path *pp;
814         int ret = 0, i;
815         struct config *conf;
816
817         condlog(3, "%s: add path (uevent)", uev->kernel);
818         if (strstr(uev->kernel, "..") != NULL) {
819                 /*
820                  * Don't allow relative device names in the pathvec
821                  */
822                 condlog(0, "%s: path name is invalid", uev->kernel);
823                 return 1;
824         }
825
826         pthread_cleanup_push(cleanup_lock, &vecs->lock);
827         lock(&vecs->lock);
828         pthread_testcancel();
829         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
830         if (pp) {
831                 int r;
832
833                 condlog(3, "%s: spurious uevent, path already in pathvec",
834                         uev->kernel);
835                 if (!pp->mpp && !strlen(pp->wwid)) {
836                         condlog(3, "%s: reinitialize path", uev->kernel);
837                         udev_device_unref(pp->udev);
838                         pp->udev = udev_device_ref(uev->udev);
839                         conf = get_multipath_config();
840                         pthread_cleanup_push(put_multipath_config, conf);
841                         r = pathinfo(pp, conf,
842                                      DI_ALL | DI_BLACKLIST);
843                         pthread_cleanup_pop(1);
844                         if (r == PATHINFO_OK)
845                                 ret = ev_add_path(pp, vecs, need_do_map);
846                         else if (r == PATHINFO_SKIPPED) {
847                                 condlog(3, "%s: remove blacklisted path",
848                                         uev->kernel);
849                                 i = find_slot(vecs->pathvec, (void *)pp);
850                                 if (i != -1)
851                                         vector_del_slot(vecs->pathvec, i);
852                                 free_path(pp);
853                         } else {
854                                 condlog(0, "%s: failed to reinitialize path",
855                                         uev->kernel);
856                                 ret = 1;
857                         }
858                 }
859         }
860         lock_cleanup_pop(vecs->lock);
861         if (pp)
862                 return ret;
863
864         /*
865          * get path vital state
866          */
867         conf = get_multipath_config();
868         pthread_cleanup_push(put_multipath_config, conf);
869         ret = alloc_path_with_pathinfo(conf, uev->udev,
870                                        uev->wwid, DI_ALL, &pp);
871         pthread_cleanup_pop(1);
872         if (!pp) {
873                 if (ret == PATHINFO_SKIPPED)
874                         return 0;
875                 condlog(3, "%s: failed to get path info", uev->kernel);
876                 return 1;
877         }
878         pthread_cleanup_push(cleanup_lock, &vecs->lock);
879         lock(&vecs->lock);
880         pthread_testcancel();
881         ret = store_path(vecs->pathvec, pp);
882         if (!ret) {
883                 conf = get_multipath_config();
884                 pp->checkint = conf->checkint;
885                 put_multipath_config(conf);
886                 ret = ev_add_path(pp, vecs, need_do_map);
887         } else {
888                 condlog(0, "%s: failed to store path info, "
889                         "dropping event",
890                         uev->kernel);
891                 free_path(pp);
892                 ret = 1;
893         }
894         lock_cleanup_pop(vecs->lock);
895         return ret;
896 }
897
898 /*
899  * returns:
900  * 0: added
901  * 1: error
902  */
903 int
904 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
905 {
906         struct multipath * mpp;
907         char params[PARAMS_SIZE] = {0};
908         int retries = 3;
909         int start_waiter = 0;
910         int ret;
911
912         /*
913          * need path UID to go any further
914          */
915         if (strlen(pp->wwid) == 0) {
916                 condlog(0, "%s: failed to get path uid", pp->dev);
917                 goto fail; /* leave path added to pathvec */
918         }
919         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
920         if (mpp && mpp->wait_for_udev &&
921             (pathcount(mpp, PATH_UP) > 0 ||
922              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
923               mpp->ghost_delay_tick <= 0))) {
924                 /* if wait_for_udev is set and valid paths exist */
925                 condlog(3, "%s: delaying path addition until %s is fully initialized",
926                         pp->dev, mpp->alias);
927                 mpp->wait_for_udev = 2;
928                 orphan_path(pp, "waiting for create to complete");
929                 return 0;
930         }
931
932         pp->mpp = mpp;
933 rescan:
934         if (mpp) {
935                 if (pp->size && mpp->size != pp->size) {
936                         condlog(0, "%s: failed to add new path %s, "
937                                 "device size mismatch",
938                                 mpp->alias, pp->dev);
939                         int i = find_slot(vecs->pathvec, (void *)pp);
940                         if (i != -1)
941                                 vector_del_slot(vecs->pathvec, i);
942                         free_path(pp);
943                         return 1;
944                 }
945
946                 condlog(4,"%s: adopting all paths for path %s",
947                         mpp->alias, pp->dev);
948                 if (adopt_paths(vecs->pathvec, mpp))
949                         goto fail; /* leave path added to pathvec */
950
951                 verify_paths(mpp, vecs);
952                 mpp->action = ACT_RELOAD;
953                 extract_hwe_from_path(mpp);
954         } else {
955                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
956                         orphan_path(pp, "only one path");
957                         return 0;
958                 }
959                 condlog(4,"%s: creating new map", pp->dev);
960                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
961                         mpp->action = ACT_CREATE;
962                         /*
963                          * We don't depend on ACT_CREATE, as domap will
964                          * set it to ACT_NOTHING when complete.
965                          */
966                         start_waiter = 1;
967                 }
968                 if (!start_waiter)
969                         goto fail; /* leave path added to pathvec */
970         }
971
972         /* persistent reservation check*/
973         mpath_pr_event_handle(pp);
974
975         if (!need_do_map)
976                 return 0;
977
978         if (!dm_map_present(mpp->alias)) {
979                 mpp->action = ACT_CREATE;
980                 start_waiter = 1;
981         }
982         /*
983          * push the map to the device-mapper
984          */
985         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
986                 condlog(0, "%s: failed to setup map for addition of new "
987                         "path %s", mpp->alias, pp->dev);
988                 goto fail_map;
989         }
990         /*
991          * reload the map for the multipath mapped device
992          */
993 retry:
994         ret = domap(mpp, params, 1);
995         if (ret <= 0) {
996                 if (ret < 0 && retries-- > 0) {
997                         condlog(0, "%s: retry domap for addition of new "
998                                 "path %s", mpp->alias, pp->dev);
999                         sleep(1);
1000                         goto retry;
1001                 }
1002                 condlog(0, "%s: failed in domap for addition of new "
1003                         "path %s", mpp->alias, pp->dev);
1004                 /*
1005                  * deal with asynchronous uevents :((
1006                  */
1007                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
1008                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
1009                         sleep(1);
1010                         update_mpp_paths(mpp, vecs->pathvec);
1011                         goto rescan;
1012                 }
1013                 else if (mpp->action == ACT_RELOAD)
1014                         condlog(0, "%s: giving up reload", mpp->alias);
1015                 else
1016                         goto fail_map;
1017         }
1018         dm_lib_release();
1019
1020         if ((mpp->action == ACT_CREATE ||
1021              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1022             wait_for_events(mpp, vecs))
1023                         goto fail_map;
1024
1025         /*
1026          * update our state from kernel regardless of create or reload
1027          */
1028         if (setup_multipath(vecs, mpp))
1029                 goto fail; /* if setup_multipath fails, it removes the map */
1030
1031         sync_map_state(mpp);
1032
1033         if (retries >= 0) {
1034                 condlog(2, "%s [%s]: path added to devmap %s",
1035                         pp->dev, pp->dev_t, mpp->alias);
1036                 return 0;
1037         } else
1038                 goto fail;
1039
1040 fail_map:
1041         remove_map(mpp, vecs, 1);
1042 fail:
1043         orphan_path(pp, "failed to add path");
1044         return 1;
1045 }
1046
1047 static int
1048 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1049 {
1050         struct path *pp;
1051         int ret;
1052
1053         condlog(3, "%s: remove path (uevent)", uev->kernel);
1054         delete_foreign(uev->udev);
1055
1056         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1057         lock(&vecs->lock);
1058         pthread_testcancel();
1059         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1060         if (pp)
1061                 ret = ev_remove_path(pp, vecs, need_do_map);
1062         lock_cleanup_pop(vecs->lock);
1063         if (!pp) {
1064                 /* Not an error; path might have been purged earlier */
1065                 condlog(0, "%s: path already removed", uev->kernel);
1066                 return 0;
1067         }
1068         return ret;
1069 }
1070
1071 int
1072 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1073 {
1074         struct multipath * mpp;
1075         int i, retval = 0;
1076         char params[PARAMS_SIZE] = {0};
1077
1078         /*
1079          * avoid referring to the map of an orphaned path
1080          */
1081         if ((mpp = pp->mpp)) {
1082                 /*
1083                  * transform the mp->pg vector of vectors of paths
1084                  * into a mp->params string to feed the device-mapper
1085                  */
1086                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1087                         condlog(0, "%s: failed to update paths",
1088                                 mpp->alias);
1089                         goto fail;
1090                 }
1091
1092                 /*
1093                  * Make sure mpp->hwe doesn't point to freed memory
1094                  * We call extract_hwe_from_path() below to restore mpp->hwe
1095                  */
1096                 if (mpp->hwe == pp->hwe)
1097                         mpp->hwe = NULL;
1098
1099                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1100                         vector_del_slot(mpp->paths, i);
1101
1102                 /*
1103                  * remove the map IF removing the last path
1104                  */
1105                 if (VECTOR_SIZE(mpp->paths) == 0) {
1106                         char alias[WWID_SIZE];
1107
1108                         /*
1109                          * flush_map will fail if the device is open
1110                          */
1111                         strlcpy(alias, mpp->alias, WWID_SIZE);
1112                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1113                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1114                                 mpp->retry_tick = 0;
1115                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1116                                 mpp->disable_queueing = 1;
1117                                 mpp->stat_map_failures++;
1118                                 dm_queue_if_no_path(mpp->alias, 0);
1119                         }
1120                         if (!flush_map(mpp, vecs, 1)) {
1121                                 condlog(2, "%s: removed map after"
1122                                         " removing all paths",
1123                                         alias);
1124                                 retval = 0;
1125                                 goto out;
1126                         }
1127                         /*
1128                          * Not an error, continue
1129                          */
1130                 }
1131
1132                 if (mpp->hwe == NULL)
1133                         extract_hwe_from_path(mpp);
1134
1135                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1136                         condlog(0, "%s: failed to setup map for"
1137                                 " removal of path %s", mpp->alias, pp->dev);
1138                         goto fail;
1139                 }
1140
1141                 if (mpp->wait_for_udev) {
1142                         mpp->wait_for_udev = 2;
1143                         goto out;
1144                 }
1145
1146                 if (!need_do_map)
1147                         goto out;
1148                 /*
1149                  * reload the map
1150                  */
1151                 mpp->action = ACT_RELOAD;
1152                 if (domap(mpp, params, 1) <= 0) {
1153                         condlog(0, "%s: failed in domap for "
1154                                 "removal of path %s",
1155                                 mpp->alias, pp->dev);
1156                         retval = 1;
1157                 } else {
1158                         /*
1159                          * update our state from kernel
1160                          */
1161                         if (setup_multipath(vecs, mpp))
1162                                 return 1;
1163                         sync_map_state(mpp);
1164
1165                         condlog(2, "%s [%s]: path removed from map %s",
1166                                 pp->dev, pp->dev_t, mpp->alias);
1167                 }
1168         }
1169
1170 out:
1171         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1172                 vector_del_slot(vecs->pathvec, i);
1173
1174         free_path(pp);
1175
1176         return retval;
1177
1178 fail:
1179         remove_map_and_stop_waiter(mpp, vecs);
1180         return 1;
1181 }
1182
1183 static int
1184 uev_update_path (struct uevent *uev, struct vectors * vecs)
1185 {
1186         int ro, retval = 0, rc;
1187         struct path * pp;
1188         struct config *conf;
1189         int disable_changed_wwids;
1190         int needs_reinit = 0;
1191
1192         switch ((rc = change_foreign(uev->udev))) {
1193         case FOREIGN_OK:
1194                 /* known foreign path, ignore event */
1195                 return 0;
1196         case FOREIGN_IGNORED:
1197                 break;
1198         case FOREIGN_ERR:
1199                 condlog(3, "%s: error in change_foreign", __func__);
1200                 break;
1201         default:
1202                 condlog(1, "%s: return code %d of change_forein is unsupported",
1203                         __func__, rc);
1204                 break;
1205         }
1206
1207         conf = get_multipath_config();
1208         disable_changed_wwids = conf->disable_changed_wwids;
1209         put_multipath_config(conf);
1210
1211         ro = uevent_get_disk_ro(uev);
1212
1213         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1214         lock(&vecs->lock);
1215         pthread_testcancel();
1216
1217         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1218         if (pp) {
1219                 struct multipath *mpp = pp->mpp;
1220                 char wwid[WWID_SIZE];
1221
1222                 if (pp->initialized == INIT_REQUESTED_UDEV) {
1223                         needs_reinit = 1;
1224                         goto out;
1225                 }
1226                 /* Don't deal with other types of failed initialization
1227                  * now. check_path will handle it */
1228                 if (!strlen(pp->wwid))
1229                         goto out;
1230
1231                 strcpy(wwid, pp->wwid);
1232                 get_uid(pp, pp->state, uev->udev);
1233
1234                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1235                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1236                                 uev->kernel, wwid, pp->wwid,
1237                                 (disable_changed_wwids ? "disallowing" :
1238                                  "continuing"));
1239                         strcpy(pp->wwid, wwid);
1240                         if (disable_changed_wwids) {
1241                                 if (!pp->wwid_changed) {
1242                                         pp->wwid_changed = 1;
1243                                         pp->tick = 1;
1244                                         if (pp->mpp)
1245                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1246                                 }
1247                                 goto out;
1248                         }
1249                 } else {
1250                         pp->wwid_changed = 0;
1251                         udev_device_unref(pp->udev);
1252                         pp->udev = udev_device_ref(uev->udev);
1253                         conf = get_multipath_config();
1254                         pthread_cleanup_push(put_multipath_config, conf);
1255                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1256                                 condlog(1, "%s: pathinfo failed after change uevent",
1257                                         uev->kernel);
1258                         pthread_cleanup_pop(1);
1259                 }
1260
1261                 if (mpp && ro >= 0) {
1262                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1263
1264                         if (mpp->wait_for_udev)
1265                                 mpp->wait_for_udev = 2;
1266                         else {
1267                                 if (ro == 1)
1268                                         pp->mpp->force_readonly = 1;
1269                                 retval = reload_map(vecs, mpp, 0, 1);
1270                                 pp->mpp->force_readonly = 0;
1271                                 condlog(2, "%s: map %s reloaded (retval %d)",
1272                                         uev->kernel, mpp->alias, retval);
1273                         }
1274                 }
1275         }
1276 out:
1277         lock_cleanup_pop(vecs->lock);
1278         if (!pp) {
1279                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1280                 if (uev->udev) {
1281                         int flag = DI_SYSFS | DI_WWID;
1282
1283                         conf = get_multipath_config();
1284                         pthread_cleanup_push(put_multipath_config, conf);
1285                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1286                         pthread_cleanup_pop(1);
1287
1288                         if (retval == PATHINFO_SKIPPED) {
1289                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1290                                 return 0;
1291                         }
1292                 }
1293
1294                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1295         }
1296         if (needs_reinit)
1297                 retval = uev_add_path(uev, vecs, 1);
1298         return retval;
1299 }
1300
1301 static int
1302 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1303 {
1304         char *action = NULL, *devt = NULL;
1305         struct path *pp;
1306         int r = 1;
1307
1308         action = uevent_get_dm_action(uev);
1309         if (!action)
1310                 return 1;
1311         if (strncmp(action, "PATH_FAILED", 11))
1312                 goto out;
1313         devt = uevent_get_dm_path(uev);
1314         if (!devt) {
1315                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1316                 goto out;
1317         }
1318
1319         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1320         lock(&vecs->lock);
1321         pthread_testcancel();
1322         pp = find_path_by_devt(vecs->pathvec, devt);
1323         if (!pp)
1324                 goto out_lock;
1325         r = io_err_stat_handle_pathfail(pp);
1326         if (r)
1327                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1328                                 pp->dev);
1329 out_lock:
1330         lock_cleanup_pop(vecs->lock);
1331         FREE(devt);
1332         FREE(action);
1333         return r;
1334 out:
1335         FREE(action);
1336         return 1;
1337 }
1338
1339 static int
1340 map_discovery (struct vectors * vecs)
1341 {
1342         struct multipath * mpp;
1343         unsigned int i;
1344
1345         if (dm_get_maps(vecs->mpvec))
1346                 return 1;
1347
1348         vector_foreach_slot (vecs->mpvec, mpp, i)
1349                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1350                     update_multipath_status(mpp)) {
1351                         remove_map(mpp, vecs, 1);
1352                         i--;
1353                 }
1354
1355         return 0;
1356 }
1357
1358 int
1359 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1360                 void * trigger_data)
1361 {
1362         struct vectors * vecs;
1363         int r;
1364
1365         *reply = NULL;
1366         *len = 0;
1367         vecs = (struct vectors *)trigger_data;
1368
1369         if ((str != NULL) && (is_root == false) &&
1370             (strncmp(str, "list", strlen("list")) != 0) &&
1371             (strncmp(str, "show", strlen("show")) != 0)) {
1372                 *reply = STRDUP("permission deny: need to be root");
1373                 if (*reply)
1374                         *len = strlen(*reply) + 1;
1375                 return 1;
1376         }
1377
1378         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1379
1380         if (r > 0) {
1381                 if (r == ETIMEDOUT)
1382                         *reply = STRDUP("timeout\n");
1383                 else
1384                         *reply = STRDUP("fail\n");
1385                 if (*reply)
1386                         *len = strlen(*reply) + 1;
1387                 r = 1;
1388         }
1389         else if (!r && *len == 0) {
1390                 *reply = STRDUP("ok\n");
1391                 if (*reply)
1392                         *len = strlen(*reply) + 1;
1393                 r = 0;
1394         }
1395         /* else if (r < 0) leave *reply alone */
1396
1397         return r;
1398 }
1399
1400 int
1401 uev_trigger (struct uevent * uev, void * trigger_data)
1402 {
1403         int r = 0;
1404         struct vectors * vecs;
1405         struct uevent *merge_uev, *tmp;
1406
1407         vecs = (struct vectors *)trigger_data;
1408
1409         pthread_cleanup_push(config_cleanup, NULL);
1410         pthread_mutex_lock(&config_lock);
1411         if (running_state != DAEMON_IDLE &&
1412             running_state != DAEMON_RUNNING)
1413                 pthread_cond_wait(&config_cond, &config_lock);
1414         pthread_cleanup_pop(1);
1415
1416         if (running_state == DAEMON_SHUTDOWN)
1417                 return 0;
1418
1419         /*
1420          * device map event
1421          * Add events are ignored here as the tables
1422          * are not fully initialised then.
1423          */
1424         if (!strncmp(uev->kernel, "dm-", 3)) {
1425                 if (!uevent_is_mpath(uev)) {
1426                         if (!strncmp(uev->action, "change", 6))
1427                                 (void)add_foreign(uev->udev);
1428                         else if (!strncmp(uev->action, "remove", 6))
1429                                 (void)delete_foreign(uev->udev);
1430                         goto out;
1431                 }
1432                 if (!strncmp(uev->action, "change", 6)) {
1433                         r = uev_add_map(uev, vecs);
1434
1435                         /*
1436                          * the kernel-side dm-mpath issues a PATH_FAILED event
1437                          * when it encounters a path IO error. It is reason-
1438                          * able be the entry of path IO error accounting pro-
1439                          * cess.
1440                          */
1441                         uev_pathfail_check(uev, vecs);
1442                 } else if (!strncmp(uev->action, "remove", 6)) {
1443                         r = uev_remove_map(uev, vecs);
1444                 }
1445                 goto out;
1446         }
1447
1448         /*
1449          * path add/remove/change event, add/remove maybe merged
1450          */
1451         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1452                 if (!strncmp(merge_uev->action, "add", 3))
1453                         r += uev_add_path(merge_uev, vecs, 0);
1454                 if (!strncmp(merge_uev->action, "remove", 6))
1455                         r += uev_remove_path(merge_uev, vecs, 0);
1456         }
1457
1458         if (!strncmp(uev->action, "add", 3))
1459                 r += uev_add_path(uev, vecs, 1);
1460         if (!strncmp(uev->action, "remove", 6))
1461                 r += uev_remove_path(uev, vecs, 1);
1462         if (!strncmp(uev->action, "change", 6))
1463                 r += uev_update_path(uev, vecs);
1464
1465 out:
1466         return r;
1467 }
1468
1469 static void rcu_unregister(void *param)
1470 {
1471         rcu_unregister_thread();
1472 }
1473
1474 static void *
1475 ueventloop (void * ap)
1476 {
1477         struct udev *udev = ap;
1478
1479         pthread_cleanup_push(rcu_unregister, NULL);
1480         rcu_register_thread();
1481         if (uevent_listen(udev))
1482                 condlog(0, "error starting uevent listener");
1483         pthread_cleanup_pop(1);
1484         return NULL;
1485 }
1486
1487 static void *
1488 uevqloop (void * ap)
1489 {
1490         pthread_cleanup_push(rcu_unregister, NULL);
1491         rcu_register_thread();
1492         if (uevent_dispatch(&uev_trigger, ap))
1493                 condlog(0, "error starting uevent dispatcher");
1494         pthread_cleanup_pop(1);
1495         return NULL;
1496 }
1497 static void *
1498 uxlsnrloop (void * ap)
1499 {
1500         long ux_sock;
1501
1502         pthread_cleanup_push(rcu_unregister, NULL);
1503         rcu_register_thread();
1504
1505         ux_sock = ux_socket_listen(DEFAULT_SOCKET);
1506         if (ux_sock == -1) {
1507                 condlog(1, "could not create uxsock: %d", errno);
1508                 exit_daemon();
1509                 goto out;
1510         }
1511         pthread_cleanup_push(uxsock_cleanup, (void *)ux_sock);
1512
1513         if (cli_init()) {
1514                 condlog(1, "Failed to init uxsock listener");
1515                 exit_daemon();
1516                 goto out_sock;
1517         }
1518         set_handler_callback(LIST+PATHS, cli_list_paths);
1519         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1520         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1521         set_handler_callback(LIST+PATH, cli_list_path);
1522         set_handler_callback(LIST+MAPS, cli_list_maps);
1523         set_handler_callback(LIST+STATUS, cli_list_status);
1524         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1525         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1526         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1527         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1528         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1529         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1530         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1531         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1532         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1533         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1534         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1535         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1536         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1537         set_handler_callback(LIST+CONFIG, cli_list_config);
1538         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1539         set_handler_callback(LIST+DEVICES, cli_list_devices);
1540         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1541         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1542         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1543         set_handler_callback(ADD+PATH, cli_add_path);
1544         set_handler_callback(DEL+PATH, cli_del_path);
1545         set_handler_callback(ADD+MAP, cli_add_map);
1546         set_handler_callback(DEL+MAP, cli_del_map);
1547         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1548         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1549         set_handler_callback(SUSPEND+MAP, cli_suspend);
1550         set_handler_callback(RESUME+MAP, cli_resume);
1551         set_handler_callback(RESIZE+MAP, cli_resize);
1552         set_handler_callback(RELOAD+MAP, cli_reload);
1553         set_handler_callback(RESET+MAP, cli_reassign);
1554         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1555         set_handler_callback(FAIL+PATH, cli_fail);
1556         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1557         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1558         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1559         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1560         set_unlocked_handler_callback(QUIT, cli_quit);
1561         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1562         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1563         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1564         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1565         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1566         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1567         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1568         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1569         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1570
1571         umask(077);
1572         uxsock_listen(&uxsock_trigger, ux_sock, ap);
1573
1574 out_sock:
1575         pthread_cleanup_pop(1); /* uxsock_cleanup */
1576 out:
1577         pthread_cleanup_pop(1); /* rcu_unregister */
1578         return NULL;
1579 }
1580
1581 void
1582 exit_daemon (void)
1583 {
1584         post_config_state(DAEMON_SHUTDOWN);
1585 }
1586
1587 static void
1588 fail_path (struct path * pp, int del_active)
1589 {
1590         if (!pp->mpp)
1591                 return;
1592
1593         condlog(2, "checker failed path %s in map %s",
1594                  pp->dev_t, pp->mpp->alias);
1595
1596         dm_fail_path(pp->mpp->alias, pp->dev_t);
1597         if (del_active)
1598                 update_queue_mode_del_path(pp->mpp);
1599 }
1600
1601 /*
1602  * caller must have locked the path list before calling that function
1603  */
1604 static int
1605 reinstate_path (struct path * pp, int add_active)
1606 {
1607         int ret = 0;
1608
1609         if (!pp->mpp)
1610                 return 0;
1611
1612         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1613                 condlog(0, "%s: reinstate failed", pp->dev_t);
1614                 ret = 1;
1615         } else {
1616                 condlog(2, "%s: reinstated", pp->dev_t);
1617                 if (add_active)
1618                         update_queue_mode_add_path(pp->mpp);
1619         }
1620         return ret;
1621 }
1622
1623 static void
1624 enable_group(struct path * pp)
1625 {
1626         struct pathgroup * pgp;
1627
1628         /*
1629          * if path is added through uev_add_path, pgindex can be unset.
1630          * next update_strings() will set it, upon map reload event.
1631          *
1632          * we can safely return here, because upon map reload, all
1633          * PG will be enabled.
1634          */
1635         if (!pp->mpp->pg || !pp->pgindex)
1636                 return;
1637
1638         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1639
1640         if (pgp->status == PGSTATE_DISABLED) {
1641                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1642                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1643         }
1644 }
1645
1646 static void
1647 mpvec_garbage_collector (struct vectors * vecs)
1648 {
1649         struct multipath * mpp;
1650         unsigned int i;
1651
1652         if (!vecs->mpvec)
1653                 return;
1654
1655         vector_foreach_slot (vecs->mpvec, mpp, i) {
1656                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1657                         condlog(2, "%s: remove dead map", mpp->alias);
1658                         remove_map_and_stop_waiter(mpp, vecs);
1659                         i--;
1660                 }
1661         }
1662 }
1663
1664 /* This is called after a path has started working again. It the multipath
1665  * device for this path uses the followover failback type, and this is the
1666  * best pathgroup, and this is the first path in the pathgroup to come back
1667  * up, then switch to this pathgroup */
1668 static int
1669 followover_should_failback(struct path * pp)
1670 {
1671         struct pathgroup * pgp;
1672         struct path *pp1;
1673         int i;
1674
1675         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1676             !pp->mpp->pg || !pp->pgindex ||
1677             pp->pgindex != pp->mpp->bestpg)
1678                 return 0;
1679
1680         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1681         vector_foreach_slot(pgp->paths, pp1, i) {
1682                 if (pp1 == pp)
1683                         continue;
1684                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1685                         return 0;
1686         }
1687         return 1;
1688 }
1689
1690 static void
1691 missing_uev_wait_tick(struct vectors *vecs)
1692 {
1693         struct multipath * mpp;
1694         unsigned int i;
1695         int timed_out = 0, delayed_reconfig;
1696         struct config *conf;
1697
1698         vector_foreach_slot (vecs->mpvec, mpp, i) {
1699                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1700                         timed_out = 1;
1701                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1702                         if (mpp->wait_for_udev > 1 &&
1703                             update_map(mpp, vecs, 0)) {
1704                                 /* update_map removed map */
1705                                 i--;
1706                                 continue;
1707                         }
1708                         mpp->wait_for_udev = 0;
1709                 }
1710         }
1711
1712         conf = get_multipath_config();
1713         delayed_reconfig = conf->delayed_reconfig;
1714         put_multipath_config(conf);
1715         if (timed_out && delayed_reconfig &&
1716             !need_to_delay_reconfig(vecs)) {
1717                 condlog(2, "reconfigure (delayed)");
1718                 set_config_state(DAEMON_CONFIGURE);
1719         }
1720 }
1721
1722 static void
1723 ghost_delay_tick(struct vectors *vecs)
1724 {
1725         struct multipath * mpp;
1726         unsigned int i;
1727
1728         vector_foreach_slot (vecs->mpvec, mpp, i) {
1729                 if (mpp->ghost_delay_tick <= 0)
1730                         continue;
1731                 if (--mpp->ghost_delay_tick <= 0) {
1732                         condlog(0, "%s: timed out waiting for active path",
1733                                 mpp->alias);
1734                         mpp->force_udev_reload = 1;
1735                         if (update_map(mpp, vecs, 0) != 0) {
1736                                 /* update_map removed map */
1737                                 i--;
1738                                 continue;
1739                         }
1740                 }
1741         }
1742 }
1743
1744 static void
1745 defered_failback_tick (vector mpvec)
1746 {
1747         struct multipath * mpp;
1748         unsigned int i;
1749
1750         vector_foreach_slot (mpvec, mpp, i) {
1751                 /*
1752                  * deferred failback getting sooner
1753                  */
1754                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1755                         mpp->failback_tick--;
1756
1757                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1758                                 switch_pathgroup(mpp);
1759                 }
1760         }
1761 }
1762
1763 static void
1764 retry_count_tick(vector mpvec)
1765 {
1766         struct multipath *mpp;
1767         unsigned int i;
1768
1769         vector_foreach_slot (mpvec, mpp, i) {
1770                 if (mpp->retry_tick > 0) {
1771                         mpp->stat_total_queueing_time++;
1772                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1773                         if(--mpp->retry_tick == 0) {
1774                                 mpp->stat_map_failures++;
1775                                 dm_queue_if_no_path(mpp->alias, 0);
1776                                 condlog(2, "%s: Disable queueing", mpp->alias);
1777                         }
1778                 }
1779         }
1780 }
1781
1782 int update_prio(struct path *pp, int refresh_all)
1783 {
1784         int oldpriority;
1785         struct path *pp1;
1786         struct pathgroup * pgp;
1787         int i, j, changed = 0;
1788         struct config *conf;
1789
1790         if (refresh_all) {
1791                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1792                         vector_foreach_slot (pgp->paths, pp1, j) {
1793                                 oldpriority = pp1->priority;
1794                                 conf = get_multipath_config();
1795                                 pthread_cleanup_push(put_multipath_config,
1796                                                      conf);
1797                                 pathinfo(pp1, conf, DI_PRIO);
1798                                 pthread_cleanup_pop(1);
1799                                 if (pp1->priority != oldpriority)
1800                                         changed = 1;
1801                         }
1802                 }
1803                 return changed;
1804         }
1805         oldpriority = pp->priority;
1806         conf = get_multipath_config();
1807         pthread_cleanup_push(put_multipath_config, conf);
1808         if (pp->state != PATH_DOWN)
1809                 pathinfo(pp, conf, DI_PRIO);
1810         pthread_cleanup_pop(1);
1811
1812         if (pp->priority == oldpriority)
1813                 return 0;
1814         return 1;
1815 }
1816
1817 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1818 {
1819         if (reload_map(vecs, mpp, refresh, 1))
1820                 return 1;
1821
1822         dm_lib_release();
1823         if (setup_multipath(vecs, mpp) != 0)
1824                 return 1;
1825         sync_map_state(mpp);
1826
1827         return 0;
1828 }
1829
1830 /*
1831  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1832  * and '0' otherwise
1833  */
1834 int
1835 check_path (struct vectors * vecs, struct path * pp, int ticks)
1836 {
1837         int newstate;
1838         int new_path_up = 0;
1839         int chkr_new_path_up = 0;
1840         int add_active;
1841         int disable_reinstate = 0;
1842         int oldchkrstate = pp->chkrstate;
1843         int retrigger_tries, checkint, max_checkint, verbosity;
1844         struct config *conf;
1845         int ret;
1846
1847         if ((pp->initialized == INIT_OK ||
1848              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1849                 return 0;
1850
1851         if (pp->tick)
1852                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1853         if (pp->tick)
1854                 return 0; /* don't check this path yet */
1855
1856         conf = get_multipath_config();
1857         retrigger_tries = conf->retrigger_tries;
1858         checkint = conf->checkint;
1859         max_checkint = conf->max_checkint;
1860         verbosity = conf->verbosity;
1861         put_multipath_config(conf);
1862
1863         if (pp->checkint == CHECKINT_UNDEF) {
1864                 condlog(0, "%s: BUG: checkint is not set", pp->dev);
1865                 pp->checkint = checkint;
1866         };
1867
1868         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV) {
1869                 if (pp->retriggers < retrigger_tries) {
1870                         condlog(2, "%s: triggering change event to reinitialize",
1871                                 pp->dev);
1872                         pp->initialized = INIT_REQUESTED_UDEV;
1873                         pp->retriggers++;
1874                         sysfs_attr_set_value(pp->udev, "uevent", "change",
1875                                              strlen("change"));
1876                         return 0;
1877                 } else {
1878                         condlog(1, "%s: not initialized after %d udev retriggers",
1879                                 pp->dev, retrigger_tries);
1880                         /*
1881                          * Make sure that the "add missing path" code path
1882                          * below may reinstate the path later, if it ever
1883                          * comes up again.
1884                          * The WWID needs not be cleared; if it was set, the
1885                          * state hadn't been INIT_MISSING_UDEV in the first
1886                          * place.
1887                          */
1888                         pp->initialized = INIT_FAILED;
1889                         return 0;
1890                 }
1891         }
1892
1893         /*
1894          * provision a next check soonest,
1895          * in case we exit abnormaly from here
1896          */
1897         pp->tick = checkint;
1898
1899         newstate = path_offline(pp);
1900         /*
1901          * Wait for uevent for removed paths;
1902          * some LLDDs like zfcp keep paths unavailable
1903          * without sending uevents.
1904          */
1905         if (newstate == PATH_REMOVED)
1906                 newstate = PATH_DOWN;
1907
1908         if (newstate == PATH_UP) {
1909                 conf = get_multipath_config();
1910                 pthread_cleanup_push(put_multipath_config, conf);
1911                 newstate = get_state(pp, conf, 1, newstate);
1912                 pthread_cleanup_pop(1);
1913         } else
1914                 checker_clear_message(&pp->checker);
1915
1916         if (pp->wwid_changed) {
1917                 condlog(2, "%s: path wwid has changed. Refusing to use",
1918                         pp->dev);
1919                 newstate = PATH_DOWN;
1920         }
1921
1922         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1923                 condlog(2, "%s: unusable path - checker failed", pp->dev);
1924                 LOG_MSG(2, verbosity, pp);
1925                 conf = get_multipath_config();
1926                 pthread_cleanup_push(put_multipath_config, conf);
1927                 pathinfo(pp, conf, 0);
1928                 pthread_cleanup_pop(1);
1929                 return 1;
1930         }
1931         if (!pp->mpp) {
1932                 if (!strlen(pp->wwid) && pp->initialized == INIT_FAILED &&
1933                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1934                         condlog(2, "%s: add missing path", pp->dev);
1935                         conf = get_multipath_config();
1936                         pthread_cleanup_push(put_multipath_config, conf);
1937                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1938                         pthread_cleanup_pop(1);
1939                         /* INIT_OK implies ret == PATHINFO_OK */
1940                         if (pp->initialized == INIT_OK) {
1941                                 ev_add_path(pp, vecs, 1);
1942                                 pp->tick = 1;
1943                         } else {
1944                                 /*
1945                                  * We failed multiple times to initialize this
1946                                  * path properly. Don't re-check too often.
1947                                  */
1948                                 pp->checkint = max_checkint;
1949                                 if (ret == PATHINFO_SKIPPED)
1950                                         return -1;
1951                         }
1952                 }
1953                 return 0;
1954         }
1955         /*
1956          * Async IO in flight. Keep the previous path state
1957          * and reschedule as soon as possible
1958          */
1959         if (newstate == PATH_PENDING) {
1960                 pp->tick = 1;
1961                 return 0;
1962         }
1963         /*
1964          * Synchronize with kernel state
1965          */
1966         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1967                 condlog(1, "%s: Could not synchronize with kernel state",
1968                         pp->dev);
1969                 pp->dmstate = PSTATE_UNDEF;
1970         }
1971         /* if update_multipath_strings orphaned the path, quit early */
1972         if (!pp->mpp)
1973                 return 0;
1974
1975         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1976                 pp->state = PATH_SHAKY;
1977                 /*
1978                  * to reschedule as soon as possible,so that this path can
1979                  * be recoverd in time
1980                  */
1981                 pp->tick = 1;
1982                 return 1;
1983         }
1984
1985         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1986              pp->wait_checks > 0) {
1987                 if (pp->mpp->nr_active > 0) {
1988                         pp->state = PATH_DELAYED;
1989                         pp->wait_checks--;
1990                         return 1;
1991                 } else
1992                         pp->wait_checks = 0;
1993         }
1994
1995         /*
1996          * don't reinstate failed path, if its in stand-by
1997          * and if target supports only implicit tpgs mode.
1998          * this will prevent unnecessary i/o by dm on stand-by
1999          * paths if there are no other active paths in map.
2000          */
2001         disable_reinstate = (newstate == PATH_GHOST &&
2002                             pp->mpp->nr_active == 0 &&
2003                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
2004
2005         pp->chkrstate = newstate;
2006         if (newstate != pp->state) {
2007                 int oldstate = pp->state;
2008                 pp->state = newstate;
2009
2010                 LOG_MSG(1, verbosity, pp);
2011
2012                 /*
2013                  * upon state change, reset the checkint
2014                  * to the shortest delay
2015                  */
2016                 conf = get_multipath_config();
2017                 pp->checkint = conf->checkint;
2018                 put_multipath_config(conf);
2019
2020                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
2021                         /*
2022                          * proactively fail path in the DM
2023                          */
2024                         if (oldstate == PATH_UP ||
2025                             oldstate == PATH_GHOST) {
2026                                 fail_path(pp, 1);
2027                                 if (pp->mpp->delay_wait_checks > 0 &&
2028                                     pp->watch_checks > 0) {
2029                                         pp->wait_checks = pp->mpp->delay_wait_checks;
2030                                         pp->watch_checks = 0;
2031                                 }
2032                         }else
2033                                 fail_path(pp, 0);
2034
2035                         /*
2036                          * cancel scheduled failback
2037                          */
2038                         pp->mpp->failback_tick = 0;
2039
2040                         pp->mpp->stat_path_failures++;
2041                         return 1;
2042                 }
2043
2044                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
2045                         if (pp->mpp->prflag) {
2046                                 /*
2047                                  * Check Persistent Reservation.
2048                                  */
2049                                 condlog(2, "%s: checking persistent "
2050                                         "reservation registration", pp->dev);
2051                                 mpath_pr_event_handle(pp);
2052                         }
2053                 }
2054
2055                 /*
2056                  * reinstate this path
2057                  */
2058                 if (oldstate != PATH_UP &&
2059                     oldstate != PATH_GHOST) {
2060                         if (pp->mpp->delay_watch_checks > 0)
2061                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2062                         add_active = 1;
2063                 } else {
2064                         if (pp->watch_checks > 0)
2065                                 pp->watch_checks--;
2066                         add_active = 0;
2067                 }
2068                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2069                         condlog(3, "%s: reload map", pp->dev);
2070                         ev_add_path(pp, vecs, 1);
2071                         pp->tick = 1;
2072                         return 0;
2073                 }
2074                 new_path_up = 1;
2075
2076                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2077                         chkr_new_path_up = 1;
2078
2079                 /*
2080                  * if at least one path is up in a group, and
2081                  * the group is disabled, re-enable it
2082                  */
2083                 if (newstate == PATH_UP)
2084                         enable_group(pp);
2085         }
2086         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2087                 if ((pp->dmstate == PSTATE_FAILED ||
2088                     pp->dmstate == PSTATE_UNDEF) &&
2089                     !disable_reinstate) {
2090                         /* Clear IO errors */
2091                         if (reinstate_path(pp, 0)) {
2092                                 condlog(3, "%s: reload map", pp->dev);
2093                                 ev_add_path(pp, vecs, 1);
2094                                 pp->tick = 1;
2095                                 return 0;
2096                         }
2097                 } else {
2098                         LOG_MSG(4, verbosity, pp);
2099                         if (pp->checkint != max_checkint) {
2100                                 /*
2101                                  * double the next check delay.
2102                                  * max at conf->max_checkint
2103                                  */
2104                                 if (pp->checkint < (max_checkint / 2))
2105                                         pp->checkint = 2 * pp->checkint;
2106                                 else
2107                                         pp->checkint = max_checkint;
2108
2109                                 condlog(4, "%s: delay next check %is",
2110                                         pp->dev_t, pp->checkint);
2111                         }
2112                         if (pp->watch_checks > 0)
2113                                 pp->watch_checks--;
2114                         pp->tick = pp->checkint;
2115                 }
2116         }
2117         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2118                 if (pp->dmstate == PSTATE_ACTIVE ||
2119                     pp->dmstate == PSTATE_UNDEF)
2120                         fail_path(pp, 0);
2121                 if (newstate == PATH_DOWN) {
2122                         int log_checker_err;
2123
2124                         conf = get_multipath_config();
2125                         log_checker_err = conf->log_checker_err;
2126                         put_multipath_config(conf);
2127                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2128                                 LOG_MSG(3, verbosity, pp);
2129                         else
2130                                 LOG_MSG(2, verbosity, pp);
2131                 }
2132         }
2133
2134         pp->state = newstate;
2135
2136         if (pp->mpp->wait_for_udev)
2137                 return 1;
2138         /*
2139          * path prio refreshing
2140          */
2141         condlog(4, "path prio refresh");
2142
2143         if (update_prio(pp, new_path_up) &&
2144             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2145              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2146                 update_path_groups(pp->mpp, vecs, !new_path_up);
2147         else if (need_switch_pathgroup(pp->mpp, 0)) {
2148                 if (pp->mpp->pgfailback > 0 &&
2149                     (new_path_up || pp->mpp->failback_tick <= 0))
2150                         pp->mpp->failback_tick =
2151                                 pp->mpp->pgfailback + 1;
2152                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2153                          (chkr_new_path_up && followover_should_failback(pp)))
2154                         switch_pathgroup(pp->mpp);
2155         }
2156         return 1;
2157 }
2158
2159 static void *
2160 checkerloop (void *ap)
2161 {
2162         struct vectors *vecs;
2163         struct path *pp;
2164         int count = 0;
2165         unsigned int i;
2166         struct timespec last_time;
2167         struct config *conf;
2168
2169         pthread_cleanup_push(rcu_unregister, NULL);
2170         rcu_register_thread();
2171         mlockall(MCL_CURRENT | MCL_FUTURE);
2172         vecs = (struct vectors *)ap;
2173         condlog(2, "path checkers start up");
2174
2175         /* Tweak start time for initial path check */
2176         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2177                 last_time.tv_sec = 0;
2178         else
2179                 last_time.tv_sec -= 1;
2180
2181         while (1) {
2182                 struct timespec diff_time, start_time, end_time;
2183                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2184
2185                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2186                         start_time.tv_sec = 0;
2187                 if (start_time.tv_sec && last_time.tv_sec) {
2188                         timespecsub(&start_time, &last_time, &diff_time);
2189                         condlog(4, "tick (%lu.%06lu secs)",
2190                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2191                         last_time = start_time;
2192                         ticks = diff_time.tv_sec;
2193                 } else {
2194                         ticks = 1;
2195                         condlog(4, "tick (%d ticks)", ticks);
2196                 }
2197 #ifdef USE_SYSTEMD
2198                 if (use_watchdog)
2199                         sd_notify(0, "WATCHDOG=1");
2200 #endif
2201                 rc = set_config_state(DAEMON_RUNNING);
2202                 if (rc == ETIMEDOUT) {
2203                         condlog(4, "timeout waiting for DAEMON_IDLE");
2204                         continue;
2205                 }
2206
2207                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2208                 lock(&vecs->lock);
2209                 pthread_testcancel();
2210                 vector_foreach_slot (vecs->pathvec, pp, i) {
2211                         rc = check_path(vecs, pp, ticks);
2212                         if (rc < 0) {
2213                                 vector_del_slot(vecs->pathvec, i);
2214                                 free_path(pp);
2215                                 i--;
2216                         } else
2217                                 num_paths += rc;
2218                 }
2219                 lock_cleanup_pop(vecs->lock);
2220
2221                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2222                 lock(&vecs->lock);
2223                 pthread_testcancel();
2224                 defered_failback_tick(vecs->mpvec);
2225                 retry_count_tick(vecs->mpvec);
2226                 missing_uev_wait_tick(vecs);
2227                 ghost_delay_tick(vecs);
2228                 lock_cleanup_pop(vecs->lock);
2229
2230                 if (count)
2231                         count--;
2232                 else {
2233                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2234                         lock(&vecs->lock);
2235                         pthread_testcancel();
2236                         condlog(4, "map garbage collection");
2237                         mpvec_garbage_collector(vecs);
2238                         count = MAPGCINT;
2239                         lock_cleanup_pop(vecs->lock);
2240                 }
2241
2242                 diff_time.tv_nsec = 0;
2243                 if (start_time.tv_sec &&
2244                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2245                         timespecsub(&end_time, &start_time, &diff_time);
2246                         if (num_paths) {
2247                                 unsigned int max_checkint;
2248
2249                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
2250                                         num_paths, num_paths > 1 ? "s" : "",
2251                                         diff_time.tv_sec,
2252                                         diff_time.tv_nsec / 1000);
2253                                 conf = get_multipath_config();
2254                                 max_checkint = conf->max_checkint;
2255                                 put_multipath_config(conf);
2256                                 if (diff_time.tv_sec > max_checkint)
2257                                         condlog(1, "path checkers took longer "
2258                                                 "than %lu seconds, consider "
2259                                                 "increasing max_polling_interval",
2260                                                 diff_time.tv_sec);
2261                         }
2262                 }
2263                 check_foreign();
2264                 post_config_state(DAEMON_IDLE);
2265                 conf = get_multipath_config();
2266                 strict_timing = conf->strict_timing;
2267                 put_multipath_config(conf);
2268                 if (!strict_timing)
2269                         sleep(1);
2270                 else {
2271                         if (diff_time.tv_nsec) {
2272                                 diff_time.tv_sec = 0;
2273                                 diff_time.tv_nsec =
2274                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2275                         } else
2276                                 diff_time.tv_sec = 1;
2277
2278                         condlog(3, "waiting for %lu.%06lu secs",
2279                                 diff_time.tv_sec,
2280                                 diff_time.tv_nsec / 1000);
2281                         if (nanosleep(&diff_time, NULL) != 0) {
2282                                 condlog(3, "nanosleep failed with error %d",
2283                                         errno);
2284                                 conf = get_multipath_config();
2285                                 conf->strict_timing = 0;
2286                                 put_multipath_config(conf);
2287                                 break;
2288                         }
2289                 }
2290         }
2291         pthread_cleanup_pop(1);
2292         return NULL;
2293 }
2294
2295 int
2296 configure (struct vectors * vecs)
2297 {
2298         struct multipath * mpp;
2299         struct path * pp;
2300         vector mpvec;
2301         int i, ret;
2302         struct config *conf;
2303         static int force_reload = FORCE_RELOAD_WEAK;
2304
2305         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2306                 condlog(0, "couldn't allocate path vec in configure");
2307                 return 1;
2308         }
2309
2310         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2311                 condlog(0, "couldn't allocate multipath vec in configure");
2312                 return 1;
2313         }
2314
2315         if (!(mpvec = vector_alloc())) {
2316                 condlog(0, "couldn't allocate new maps vec in configure");
2317                 return 1;
2318         }
2319
2320         /*
2321          * probe for current path (from sysfs) and map (from dm) sets
2322          */
2323         ret = path_discovery(vecs->pathvec, DI_ALL);
2324         if (ret < 0) {
2325                 condlog(0, "configure failed at path discovery");
2326                 goto fail;
2327         }
2328
2329         vector_foreach_slot (vecs->pathvec, pp, i){
2330                 conf = get_multipath_config();
2331                 pthread_cleanup_push(put_multipath_config, conf);
2332                 if (filter_path(conf, pp) > 0){
2333                         vector_del_slot(vecs->pathvec, i);
2334                         free_path(pp);
2335                         i--;
2336                 }
2337                 pthread_cleanup_pop(1);
2338         }
2339         if (map_discovery(vecs)) {
2340                 condlog(0, "configure failed at map discovery");
2341                 goto fail;
2342         }
2343
2344         /*
2345          * create new set of maps & push changed ones into dm
2346          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2347          * superfluous ACT_RELOAD ioctls. Later calls are done
2348          * with FORCE_RELOAD_YES.
2349          */
2350         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2351         if (force_reload == FORCE_RELOAD_WEAK)
2352                 force_reload = FORCE_RELOAD_YES;
2353         if (ret) {
2354                 condlog(0, "configure failed while coalescing paths");
2355                 goto fail;
2356         }
2357
2358         /*
2359          * may need to remove some maps which are no longer relevant
2360          * e.g., due to blacklist changes in conf file
2361          */
2362         if (coalesce_maps(vecs, mpvec)) {
2363                 condlog(0, "configure failed while coalescing maps");
2364                 goto fail;
2365         }
2366
2367         dm_lib_release();
2368
2369         sync_maps_state(mpvec);
2370         vector_foreach_slot(mpvec, mpp, i){
2371                 if (remember_wwid(mpp->wwid) == 1)
2372                         trigger_paths_udev_change(mpp, true);
2373                 update_map_pr(mpp);
2374         }
2375
2376         /*
2377          * purge dm of old maps
2378          */
2379         remove_maps(vecs);
2380
2381         /*
2382          * save new set of maps formed by considering current path state
2383          */
2384         vector_free(vecs->mpvec);
2385         vecs->mpvec = mpvec;
2386
2387         /*
2388          * start dm event waiter threads for these new maps
2389          */
2390         vector_foreach_slot(vecs->mpvec, mpp, i) {
2391                 if (wait_for_events(mpp, vecs)) {
2392                         remove_map(mpp, vecs, 1);
2393                         i--;
2394                         continue;
2395                 }
2396                 if (setup_multipath(vecs, mpp))
2397                         i--;
2398         }
2399         return 0;
2400
2401 fail:
2402         vector_free(mpvec);
2403         return 1;
2404 }
2405
2406 int
2407 need_to_delay_reconfig(struct vectors * vecs)
2408 {
2409         struct multipath *mpp;
2410         int i;
2411
2412         if (!VECTOR_SIZE(vecs->mpvec))
2413                 return 0;
2414
2415         vector_foreach_slot(vecs->mpvec, mpp, i) {
2416                 if (mpp->wait_for_udev)
2417                         return 1;
2418         }
2419         return 0;
2420 }
2421
2422 void rcu_free_config(struct rcu_head *head)
2423 {
2424         struct config *conf = container_of(head, struct config, rcu);
2425
2426         free_config(conf);
2427 }
2428
2429 int
2430 reconfigure (struct vectors * vecs)
2431 {
2432         struct config * old, *conf;
2433
2434         conf = load_config(DEFAULT_CONFIGFILE);
2435         if (!conf)
2436                 return 1;
2437
2438         /*
2439          * free old map and path vectors ... they use old conf state
2440          */
2441         if (VECTOR_SIZE(vecs->mpvec))
2442                 remove_maps_and_stop_waiters(vecs);
2443
2444         free_pathvec(vecs->pathvec, FREE_PATHS);
2445         vecs->pathvec = NULL;
2446         delete_all_foreign();
2447
2448         /* Re-read any timezone changes */
2449         tzset();
2450
2451         dm_tgt_version(conf->version, TGT_MPATH);
2452         if (verbosity)
2453                 conf->verbosity = verbosity;
2454         if (bindings_read_only)
2455                 conf->bindings_read_only = bindings_read_only;
2456         uxsock_timeout = conf->uxsock_timeout;
2457
2458         old = rcu_dereference(multipath_conf);
2459         rcu_assign_pointer(multipath_conf, conf);
2460         call_rcu(&old->rcu, rcu_free_config);
2461
2462         configure(vecs);
2463
2464
2465         return 0;
2466 }
2467
2468 static struct vectors *
2469 init_vecs (void)
2470 {
2471         struct vectors * vecs;
2472
2473         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2474
2475         if (!vecs)
2476                 return NULL;
2477
2478         pthread_mutex_init(&vecs->lock.mutex, NULL);
2479
2480         return vecs;
2481 }
2482
2483 static void *
2484 signal_set(int signo, void (*func) (int))
2485 {
2486         int r;
2487         struct sigaction sig;
2488         struct sigaction osig;
2489
2490         sig.sa_handler = func;
2491         sigemptyset(&sig.sa_mask);
2492         sig.sa_flags = 0;
2493
2494         r = sigaction(signo, &sig, &osig);
2495
2496         if (r < 0)
2497                 return (SIG_ERR);
2498         else
2499                 return (osig.sa_handler);
2500 }
2501
2502 void
2503 handle_signals(bool nonfatal)
2504 {
2505         if (exit_sig) {
2506                 condlog(2, "exit (signal)");
2507                 exit_sig = 0;
2508                 exit_daemon();
2509         }
2510         if (!nonfatal)
2511                 return;
2512         if (reconfig_sig) {
2513                 condlog(2, "reconfigure (signal)");
2514                 set_config_state(DAEMON_CONFIGURE);
2515         }
2516         if (log_reset_sig) {
2517                 condlog(2, "reset log (signal)");
2518                 if (logsink == 1)
2519                         log_thread_reset();
2520         }
2521         reconfig_sig = 0;
2522         log_reset_sig = 0;
2523 }
2524
2525 static void
2526 sighup (int sig)
2527 {
2528         reconfig_sig = 1;
2529 }
2530
2531 static void
2532 sigend (int sig)
2533 {
2534         exit_sig = 1;
2535 }
2536
2537 static void
2538 sigusr1 (int sig)
2539 {
2540         log_reset_sig = 1;
2541 }
2542
2543 static void
2544 sigusr2 (int sig)
2545 {
2546         condlog(3, "SIGUSR2 received");
2547 }
2548
2549 static void
2550 signal_init(void)
2551 {
2552         sigset_t set;
2553
2554         /* block all signals */
2555         sigfillset(&set);
2556         /* SIGPIPE occurs if logging fails */
2557         sigdelset(&set, SIGPIPE);
2558         pthread_sigmask(SIG_SETMASK, &set, NULL);
2559
2560         /* Other signals will be unblocked in the uxlsnr thread */
2561         signal_set(SIGHUP, sighup);
2562         signal_set(SIGUSR1, sigusr1);
2563         signal_set(SIGUSR2, sigusr2);
2564         signal_set(SIGINT, sigend);
2565         signal_set(SIGTERM, sigend);
2566         signal_set(SIGPIPE, sigend);
2567 }
2568
2569 static void
2570 setscheduler (void)
2571 {
2572         int res;
2573         static struct sched_param sched_param = {
2574                 .sched_priority = 99
2575         };
2576
2577         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2578
2579         if (res == -1)
2580                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2581         return;
2582 }
2583
2584 static void
2585 set_oom_adj (void)
2586 {
2587 #ifdef OOM_SCORE_ADJ_MIN
2588         int retry = 1;
2589         char *file = "/proc/self/oom_score_adj";
2590         int score = OOM_SCORE_ADJ_MIN;
2591 #else
2592         int retry = 0;
2593         char *file = "/proc/self/oom_adj";
2594         int score = OOM_ADJUST_MIN;
2595 #endif
2596         FILE *fp;
2597         struct stat st;
2598         char *envp;
2599
2600         envp = getenv("OOMScoreAdjust");
2601         if (envp) {
2602                 condlog(3, "Using systemd provided OOMScoreAdjust");
2603                 return;
2604         }
2605         do {
2606                 if (stat(file, &st) == 0){
2607                         fp = fopen(file, "w");
2608                         if (!fp) {
2609                                 condlog(0, "couldn't fopen %s : %s", file,
2610                                         strerror(errno));
2611                                 return;
2612                         }
2613                         fprintf(fp, "%i", score);
2614                         fclose(fp);
2615                         return;
2616                 }
2617                 if (errno != ENOENT) {
2618                         condlog(0, "couldn't stat %s : %s", file,
2619                                 strerror(errno));
2620                         return;
2621                 }
2622 #ifdef OOM_ADJUST_MIN
2623                 file = "/proc/self/oom_adj";
2624                 score = OOM_ADJUST_MIN;
2625 #else
2626                 retry = 0;
2627 #endif
2628         } while (retry--);
2629         condlog(0, "couldn't adjust oom score");
2630 }
2631
2632 static int
2633 child (void * param)
2634 {
2635         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2636         pthread_attr_t log_attr, misc_attr, uevent_attr;
2637         struct vectors * vecs;
2638         struct multipath * mpp;
2639         int i;
2640 #ifdef USE_SYSTEMD
2641         unsigned long checkint;
2642         int startup_done = 0;
2643 #endif
2644         int rc;
2645         int pid_fd = -1;
2646         struct config *conf;
2647         char *envp;
2648         int queue_without_daemon;
2649
2650         mlockall(MCL_CURRENT | MCL_FUTURE);
2651         signal_init();
2652         rcu_init();
2653
2654         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2655         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2656         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2657         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2658
2659         if (logsink == 1) {
2660                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2661                 log_thread_start(&log_attr);
2662                 pthread_attr_destroy(&log_attr);
2663         }
2664         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2665         if (pid_fd < 0) {
2666                 condlog(1, "failed to create pidfile");
2667                 if (logsink == 1)
2668                         log_thread_stop();
2669                 exit(1);
2670         }
2671
2672         post_config_state(DAEMON_START);
2673
2674         condlog(2, "--------start up--------");
2675         condlog(2, "read " DEFAULT_CONFIGFILE);
2676
2677         conf = load_config(DEFAULT_CONFIGFILE);
2678         if (!conf)
2679                 goto failed;
2680
2681         if (verbosity)
2682                 conf->verbosity = verbosity;
2683         if (bindings_read_only)
2684                 conf->bindings_read_only = bindings_read_only;
2685         uxsock_timeout = conf->uxsock_timeout;
2686         rcu_assign_pointer(multipath_conf, conf);
2687         if (init_checkers(conf->multipath_dir)) {
2688                 condlog(0, "failed to initialize checkers");
2689                 goto failed;
2690         }
2691         if (init_prio(conf->multipath_dir)) {
2692                 condlog(0, "failed to initialize prioritizers");
2693                 goto failed;
2694         }
2695         /* Failing this is non-fatal */
2696
2697         init_foreign(conf->multipath_dir);
2698
2699         if (poll_dmevents)
2700                 poll_dmevents = dmevent_poll_supported();
2701         setlogmask(LOG_UPTO(conf->verbosity + 3));
2702
2703         envp = getenv("LimitNOFILE");
2704
2705         if (envp)
2706                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2707         else
2708                 set_max_fds(conf->max_fds);
2709
2710         vecs = gvecs = init_vecs();
2711         if (!vecs)
2712                 goto failed;
2713
2714         setscheduler();
2715         set_oom_adj();
2716
2717 #ifdef USE_SYSTEMD
2718         envp = getenv("WATCHDOG_USEC");
2719         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2720                 /* Value is in microseconds */
2721                 conf->max_checkint = checkint / 1000000;
2722                 /* Rescale checkint */
2723                 if (conf->checkint > conf->max_checkint)
2724                         conf->checkint = conf->max_checkint;
2725                 else
2726                         conf->checkint = conf->max_checkint / 4;
2727                 condlog(3, "enabling watchdog, interval %d max %d",
2728                         conf->checkint, conf->max_checkint);
2729                 use_watchdog = conf->checkint;
2730         }
2731 #endif
2732         /*
2733          * Startup done, invalidate configuration
2734          */
2735         conf = NULL;
2736
2737         /*
2738          * Signal start of configuration
2739          */
2740         post_config_state(DAEMON_CONFIGURE);
2741
2742
2743         if (poll_dmevents) {
2744                 if (init_dmevent_waiter(vecs)) {
2745                         condlog(0, "failed to allocate dmevents waiter info");
2746                         goto failed;
2747                 }
2748                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2749                                          wait_dmevents, NULL))) {
2750                         condlog(0, "failed to create dmevent waiter thread: %d",
2751                                 rc);
2752                         goto failed;
2753                 }
2754         }
2755
2756         /*
2757          * Start uevent listener early to catch events
2758          */
2759         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2760                 condlog(0, "failed to create uevent thread: %d", rc);
2761                 goto failed;
2762         }
2763         pthread_attr_destroy(&uevent_attr);
2764         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2765                 condlog(0, "failed to create cli listener: %d", rc);
2766                 goto failed;
2767         }
2768
2769         /*
2770          * start threads
2771          */
2772         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2773                 condlog(0,"failed to create checker loop thread: %d", rc);
2774                 goto failed;
2775         }
2776         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2777                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2778                 goto failed;
2779         }
2780         pthread_attr_destroy(&misc_attr);
2781
2782         while (running_state != DAEMON_SHUTDOWN) {
2783                 pthread_cleanup_push(config_cleanup, NULL);
2784                 pthread_mutex_lock(&config_lock);
2785                 if (running_state != DAEMON_CONFIGURE &&
2786                     running_state != DAEMON_SHUTDOWN) {
2787                         pthread_cond_wait(&config_cond, &config_lock);
2788                 }
2789                 pthread_cleanup_pop(1);
2790                 if (running_state == DAEMON_CONFIGURE) {
2791                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2792                         lock(&vecs->lock);
2793                         pthread_testcancel();
2794                         if (!need_to_delay_reconfig(vecs)) {
2795                                 reconfigure(vecs);
2796                         } else {
2797                                 conf = get_multipath_config();
2798                                 conf->delayed_reconfig = 1;
2799                                 put_multipath_config(conf);
2800                         }
2801                         lock_cleanup_pop(vecs->lock);
2802                         post_config_state(DAEMON_IDLE);
2803 #ifdef USE_SYSTEMD
2804                         if (!startup_done) {
2805                                 sd_notify(0, "READY=1");
2806                                 startup_done = 1;
2807                         }
2808 #endif
2809                 }
2810         }
2811
2812         lock(&vecs->lock);
2813         conf = get_multipath_config();
2814         queue_without_daemon = conf->queue_without_daemon;
2815         put_multipath_config(conf);
2816         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2817                 vector_foreach_slot(vecs->mpvec, mpp, i)
2818                         dm_queue_if_no_path(mpp->alias, 0);
2819         remove_maps_and_stop_waiters(vecs);
2820         unlock(&vecs->lock);
2821
2822         pthread_cancel(check_thr);
2823         pthread_cancel(uevent_thr);
2824         pthread_cancel(uxlsnr_thr);
2825         pthread_cancel(uevq_thr);
2826         if (poll_dmevents)
2827                 pthread_cancel(dmevent_thr);
2828
2829         pthread_join(check_thr, NULL);
2830         pthread_join(uevent_thr, NULL);
2831         pthread_join(uxlsnr_thr, NULL);
2832         pthread_join(uevq_thr, NULL);
2833         if (poll_dmevents)
2834                 pthread_join(dmevent_thr, NULL);
2835
2836         stop_io_err_stat_thread();
2837
2838         lock(&vecs->lock);
2839         free_pathvec(vecs->pathvec, FREE_PATHS);
2840         vecs->pathvec = NULL;
2841         unlock(&vecs->lock);
2842
2843         pthread_mutex_destroy(&vecs->lock.mutex);
2844         FREE(vecs);
2845         vecs = NULL;
2846
2847         cleanup_foreign();
2848         cleanup_checkers();
2849         cleanup_prio();
2850         if (poll_dmevents)
2851                 cleanup_dmevent_waiter();
2852
2853         dm_lib_release();
2854         dm_lib_exit();
2855
2856         /* We're done here */
2857         condlog(3, "unlink pidfile");
2858         unlink(DEFAULT_PIDFILE);
2859
2860         condlog(2, "--------shut down-------");
2861
2862         if (logsink == 1)
2863                 log_thread_stop();
2864
2865         /*
2866          * Freeing config must be done after condlog() and dm_lib_exit(),
2867          * because logging functions like dlog() and dm_write_log()
2868          * reference the config.
2869          */
2870         conf = rcu_dereference(multipath_conf);
2871         rcu_assign_pointer(multipath_conf, NULL);
2872         call_rcu(&conf->rcu, rcu_free_config);
2873         udev_unref(udev);
2874         udev = NULL;
2875         pthread_attr_destroy(&waiter_attr);
2876         pthread_attr_destroy(&io_err_stat_attr);
2877 #ifdef _DEBUG_
2878         dbg_free_final(NULL);
2879 #endif
2880
2881 #ifdef USE_SYSTEMD
2882         sd_notify(0, "ERRNO=0");
2883 #endif
2884         exit(0);
2885
2886 failed:
2887 #ifdef USE_SYSTEMD
2888         sd_notify(0, "ERRNO=1");
2889 #endif
2890         if (pid_fd >= 0)
2891                 close(pid_fd);
2892         exit(1);
2893 }
2894
2895 static int
2896 daemonize(void)
2897 {
2898         int pid;
2899         int dev_null_fd;
2900
2901         if( (pid = fork()) < 0){
2902                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2903                 return -1;
2904         }
2905         else if (pid != 0)
2906                 return pid;
2907
2908         setsid();
2909
2910         if ( (pid = fork()) < 0)
2911                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2912         else if (pid != 0)
2913                 _exit(0);
2914
2915         if (chdir("/") < 0)
2916                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2917
2918         dev_null_fd = open("/dev/null", O_RDWR);
2919         if (dev_null_fd < 0){
2920                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2921                         strerror(errno));
2922                 _exit(0);
2923         }
2924
2925         close(STDIN_FILENO);
2926         if (dup(dev_null_fd) < 0) {
2927                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2928                         strerror(errno));
2929                 _exit(0);
2930         }
2931         close(STDOUT_FILENO);
2932         if (dup(dev_null_fd) < 0) {
2933                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2934                         strerror(errno));
2935                 _exit(0);
2936         }
2937         close(STDERR_FILENO);
2938         if (dup(dev_null_fd) < 0) {
2939                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2940                         strerror(errno));
2941                 _exit(0);
2942         }
2943         close(dev_null_fd);
2944         daemon_pid = getpid();
2945         return 0;
2946 }
2947
2948 int
2949 main (int argc, char *argv[])
2950 {
2951         extern char *optarg;
2952         extern int optind;
2953         int arg;
2954         int err;
2955         int foreground = 0;
2956         struct config *conf;
2957
2958         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2959                                    "Manipulated through RCU");
2960         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2961                 "Suppress complaints about unprotected running_state reads");
2962         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2963                 "Suppress complaints about this scalar variable");
2964
2965         logsink = 1;
2966
2967         if (getuid() != 0) {
2968                 fprintf(stderr, "need to be root\n");
2969                 exit(1);
2970         }
2971
2972         /* make sure we don't lock any path */
2973         if (chdir("/") < 0)
2974                 fprintf(stderr, "can't chdir to root directory : %s\n",
2975                         strerror(errno));
2976         umask(umask(077) | 022);
2977
2978         pthread_cond_init_mono(&config_cond);
2979
2980         udev = udev_new();
2981         libmp_udev_set_sync_support(0);
2982
2983         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
2984                 switch(arg) {
2985                 case 'd':
2986                         foreground = 1;
2987                         if (logsink > 0)
2988                                 logsink = 0;
2989                         //debug=1; /* ### comment me out ### */
2990                         break;
2991                 case 'v':
2992                         if (sizeof(optarg) > sizeof(char *) ||
2993                             !isdigit(optarg[0]))
2994                                 exit(1);
2995
2996                         verbosity = atoi(optarg);
2997                         break;
2998                 case 's':
2999                         logsink = -1;
3000                         break;
3001                 case 'k':
3002                         logsink = 0;
3003                         conf = load_config(DEFAULT_CONFIGFILE);
3004                         if (!conf)
3005                                 exit(1);
3006                         if (verbosity)
3007                                 conf->verbosity = verbosity;
3008                         uxsock_timeout = conf->uxsock_timeout;
3009                         err = uxclnt(optarg, uxsock_timeout + 100);
3010                         free_config(conf);
3011                         return err;
3012                 case 'B':
3013                         bindings_read_only = 1;
3014                         break;
3015                 case 'n':
3016                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
3017                         break;
3018                 case 'w':
3019                         poll_dmevents = 0;
3020                         break;
3021                 default:
3022                         fprintf(stderr, "Invalid argument '-%c'\n",
3023                                 optopt);
3024                         exit(1);
3025                 }
3026         }
3027         if (optind < argc) {
3028                 char cmd[CMDSIZE];
3029                 char * s = cmd;
3030                 char * c = s;
3031
3032                 logsink = 0;
3033                 conf = load_config(DEFAULT_CONFIGFILE);
3034                 if (!conf)
3035                         exit(1);
3036                 if (verbosity)
3037                         conf->verbosity = verbosity;
3038                 uxsock_timeout = conf->uxsock_timeout;
3039                 memset(cmd, 0x0, CMDSIZE);
3040                 while (optind < argc) {
3041                         if (strchr(argv[optind], ' '))
3042                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3043                         else
3044                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3045                         optind++;
3046                 }
3047                 c += snprintf(c, s + CMDSIZE - c, "\n");
3048                 err = uxclnt(s, uxsock_timeout + 100);
3049                 free_config(conf);
3050                 return err;
3051         }
3052
3053         if (foreground) {
3054                 if (!isatty(fileno(stdout)))
3055                         setbuf(stdout, NULL);
3056                 err = 0;
3057                 daemon_pid = getpid();
3058         } else
3059                 err = daemonize();
3060
3061         if (err < 0)
3062                 /* error */
3063                 exit(1);
3064         else if (err > 0)
3065                 /* parent dies */
3066                 exit(0);
3067         else
3068                 /* child lives */
3069                 return (child(NULL));
3070 }
3071
3072 void *  mpath_pr_event_handler_fn (void * pathp )
3073 {
3074         struct multipath * mpp;
3075         int i, ret, isFound;
3076         struct path * pp = (struct path *)pathp;
3077         struct prout_param_descriptor *param;
3078         struct prin_resp *resp;
3079
3080         rcu_register_thread();
3081         mpp = pp->mpp;
3082
3083         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3084         if (!resp){
3085                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3086                 goto out;
3087         }
3088
3089         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3090         if (ret != MPATH_PR_SUCCESS )
3091         {
3092                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3093                 goto out;
3094         }
3095
3096         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3097                         resp->prin_descriptor.prin_readkeys.additional_length );
3098
3099         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3100         {
3101                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3102                 ret = MPATH_PR_SUCCESS;
3103                 goto out;
3104         }
3105         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3106                 get_be64(mpp->reservation_key));
3107
3108         isFound =0;
3109         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3110         {
3111                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3112                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3113                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3114                 {
3115                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3116                         isFound =1;
3117                         break;
3118                 }
3119         }
3120         if (!isFound)
3121         {
3122                 condlog(0, "%s: Either device not registered or ", pp->dev);
3123                 condlog(0, "host is not authorised for registration. Skip path");
3124                 ret = MPATH_PR_OTHER;
3125                 goto out;
3126         }
3127
3128         param= malloc(sizeof(struct prout_param_descriptor));
3129         memset(param, 0 , sizeof(struct prout_param_descriptor));
3130         param->sa_flags = mpp->sa_flags;
3131         memcpy(param->sa_key, &mpp->reservation_key, 8);
3132         param->num_transportid = 0;
3133
3134         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3135
3136         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3137         if (ret != MPATH_PR_SUCCESS )
3138         {
3139                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3140         }
3141         mpp->prflag = 1;
3142
3143         free(param);
3144 out:
3145         if (resp)
3146                 free(resp);
3147         rcu_unregister_thread();
3148         return NULL;
3149 }
3150
3151 int mpath_pr_event_handle(struct path *pp)
3152 {
3153         pthread_t thread;
3154         int rc;
3155         pthread_attr_t attr;
3156         struct multipath * mpp;
3157
3158         if (pp->bus != SYSFS_BUS_SCSI)
3159                 return 0;
3160
3161         mpp = pp->mpp;
3162
3163         if (!get_be64(mpp->reservation_key))
3164                 return -1;
3165
3166         pthread_attr_init(&attr);
3167         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3168
3169         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3170         if (rc) {
3171                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3172                 return -1;
3173         }
3174         pthread_attr_destroy(&attr);
3175         rc = pthread_join(thread, NULL);
3176         return 0;
3177 }