multipathd: fix memory leak on error in configure
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <urcu.h>
21 #ifdef USE_SYSTEMD
22 #include <systemd/sd-daemon.h>
23 #endif
24 #include <semaphore.h>
25 #include <time.h>
26 #include <stdbool.h>
27
28 /*
29  * libmultipath
30  */
31 #include "time-util.h"
32
33 /*
34  * libcheckers
35  */
36 #include "checkers.h"
37
38 #ifdef USE_SYSTEMD
39 static int use_watchdog;
40 #endif
41
42 /*
43  * libmultipath
44  */
45 #include "parser.h"
46 #include "vector.h"
47 #include "memory.h"
48 #include "config.h"
49 #include "util.h"
50 #include "hwtable.h"
51 #include "defaults.h"
52 #include "structs.h"
53 #include "blacklist.h"
54 #include "structs_vec.h"
55 #include "dmparser.h"
56 #include "devmapper.h"
57 #include "sysfs.h"
58 #include "dict.h"
59 #include "discovery.h"
60 #include "debug.h"
61 #include "propsel.h"
62 #include "uevent.h"
63 #include "switchgroup.h"
64 #include "print.h"
65 #include "configure.h"
66 #include "prio.h"
67 #include "wwids.h"
68 #include "pgpolicies.h"
69 #include "uevent.h"
70 #include "log.h"
71
72 #include "mpath_cmd.h"
73 #include "mpath_persist.h"
74
75 #include "prioritizers/alua_rtpg.h"
76
77 #include "main.h"
78 #include "pidfile.h"
79 #include "uxlsnr.h"
80 #include "uxclnt.h"
81 #include "cli.h"
82 #include "cli_handlers.h"
83 #include "lock.h"
84 #include "waiter.h"
85 #include "dmevents.h"
86 #include "io_err_stat.h"
87 #include "wwids.h"
88 #include "foreign.h"
89 #include "../third-party/valgrind/drd.h"
90
91 #define FILE_NAME_SIZE 256
92 #define CMDSIZE 160
93
94 #define LOG_MSG(a, b) \
95 do { \
96         if (pp->offline) \
97                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
98         else if (strlen(b)) \
99                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
100 } while(0)
101
102 struct mpath_event_param
103 {
104         char * devname;
105         struct multipath *mpp;
106 };
107
108 int logsink;
109 int uxsock_timeout;
110 int verbosity;
111 int bindings_read_only;
112 int ignore_new_devs;
113 #ifdef NO_DMEVENTS_POLL
114 int poll_dmevents = 0;
115 #else
116 int poll_dmevents = 1;
117 #endif
118 enum daemon_status running_state = DAEMON_INIT;
119 pid_t daemon_pid;
120 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
121 pthread_cond_t config_cond;
122
123 /*
124  * global copy of vecs for use in sig handlers
125  */
126 struct vectors * gvecs;
127
128 struct udev * udev;
129
130 struct config *multipath_conf;
131
132 /* Local variables */
133 static volatile sig_atomic_t exit_sig;
134 static volatile sig_atomic_t reconfig_sig;
135 static volatile sig_atomic_t log_reset_sig;
136
137 const char *
138 daemon_status(void)
139 {
140         switch (running_state) {
141         case DAEMON_INIT:
142                 return "init";
143         case DAEMON_START:
144                 return "startup";
145         case DAEMON_CONFIGURE:
146                 return "configure";
147         case DAEMON_IDLE:
148                 return "idle";
149         case DAEMON_RUNNING:
150                 return "running";
151         case DAEMON_SHUTDOWN:
152                 return "shutdown";
153         }
154         return NULL;
155 }
156
157 /*
158  * I love you too, systemd ...
159  */
160 const char *
161 sd_notify_status(void)
162 {
163         switch (running_state) {
164         case DAEMON_INIT:
165                 return "STATUS=init";
166         case DAEMON_START:
167                 return "STATUS=startup";
168         case DAEMON_CONFIGURE:
169                 return "STATUS=configure";
170         case DAEMON_IDLE:
171         case DAEMON_RUNNING:
172                 return "STATUS=up";
173         case DAEMON_SHUTDOWN:
174                 return "STATUS=shutdown";
175         }
176         return NULL;
177 }
178
179 #ifdef USE_SYSTEMD
180 static void do_sd_notify(enum daemon_status old_state)
181 {
182         /*
183          * Checkerloop switches back and forth between idle and running state.
184          * No need to tell systemd each time.
185          * These notifications cause a lot of overhead on dbus.
186          */
187         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
188             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
189                 return;
190         sd_notify(0, sd_notify_status());
191 }
192 #endif
193
194 static void config_cleanup(void *arg)
195 {
196         pthread_mutex_unlock(&config_lock);
197 }
198
199 void post_config_state(enum daemon_status state)
200 {
201         pthread_mutex_lock(&config_lock);
202         if (state != running_state) {
203                 enum daemon_status old_state = running_state;
204
205                 running_state = state;
206                 pthread_cond_broadcast(&config_cond);
207 #ifdef USE_SYSTEMD
208                 do_sd_notify(old_state);
209 #endif
210         }
211         pthread_mutex_unlock(&config_lock);
212 }
213
214 int set_config_state(enum daemon_status state)
215 {
216         int rc = 0;
217
218         pthread_cleanup_push(config_cleanup, NULL);
219         pthread_mutex_lock(&config_lock);
220         if (running_state != state) {
221                 enum daemon_status old_state = running_state;
222
223                 if (running_state != DAEMON_IDLE) {
224                         struct timespec ts;
225
226                         clock_gettime(CLOCK_MONOTONIC, &ts);
227                         ts.tv_sec += 1;
228                         rc = pthread_cond_timedwait(&config_cond,
229                                                     &config_lock, &ts);
230                 }
231                 if (!rc) {
232                         running_state = state;
233                         pthread_cond_broadcast(&config_cond);
234 #ifdef USE_SYSTEMD
235                         do_sd_notify(old_state);
236 #endif
237                 }
238         }
239         pthread_cleanup_pop(1);
240         return rc;
241 }
242
243 struct config *get_multipath_config(void)
244 {
245         rcu_read_lock();
246         return rcu_dereference(multipath_conf);
247 }
248
249 void put_multipath_config(void *arg)
250 {
251         rcu_read_unlock();
252 }
253
254 static int
255 need_switch_pathgroup (struct multipath * mpp, int refresh)
256 {
257         struct pathgroup * pgp;
258         struct path * pp;
259         unsigned int i, j;
260         struct config *conf;
261         int bestpg;
262
263         if (!mpp)
264                 return 0;
265
266         /*
267          * Refresh path priority values
268          */
269         if (refresh) {
270                 vector_foreach_slot (mpp->pg, pgp, i) {
271                         vector_foreach_slot (pgp->paths, pp, j) {
272                                 conf = get_multipath_config();
273                                 pthread_cleanup_push(put_multipath_config,
274                                                      conf);
275                                 pathinfo(pp, conf, DI_PRIO);
276                                 pthread_cleanup_pop(1);
277                         }
278                 }
279         }
280
281         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
282                 return 0;
283
284         bestpg = select_path_group(mpp);
285         if (mpp->pgfailback == -FAILBACK_MANUAL)
286                 return 0;
287
288         mpp->bestpg = bestpg;
289         if (mpp->bestpg != mpp->nextpg)
290                 return 1;
291
292         return 0;
293 }
294
295 static void
296 switch_pathgroup (struct multipath * mpp)
297 {
298         mpp->stat_switchgroup++;
299         dm_switchgroup(mpp->alias, mpp->bestpg);
300         condlog(2, "%s: switch to path group #%i",
301                  mpp->alias, mpp->bestpg);
302 }
303
304 static int
305 wait_for_events(struct multipath *mpp, struct vectors *vecs)
306 {
307         if (poll_dmevents)
308                 return watch_dmevents(mpp->alias);
309         else
310                 return start_waiter_thread(mpp, vecs);
311 }
312
313 static void
314 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs)
315 {
316         /* devices are automatically removed by the dmevent polling code,
317          * so they don't need to be manually removed here */
318         if (!poll_dmevents)
319                 stop_waiter_thread(mpp, vecs);
320         remove_map(mpp, vecs, PURGE_VEC);
321 }
322
323 static void
324 remove_maps_and_stop_waiters(struct vectors *vecs)
325 {
326         int i;
327         struct multipath * mpp;
328
329         if (!vecs)
330                 return;
331
332         if (!poll_dmevents) {
333                 vector_foreach_slot(vecs->mpvec, mpp, i)
334                         stop_waiter_thread(mpp, vecs);
335         }
336         else
337                 unwatch_all_dmevents();
338
339         remove_maps(vecs);
340 }
341
342 static void
343 set_multipath_wwid (struct multipath * mpp)
344 {
345         if (strlen(mpp->wwid))
346                 return;
347
348         dm_get_uuid(mpp->alias, mpp->wwid);
349 }
350
351 static void set_no_path_retry(struct multipath *mpp)
352 {
353         char is_queueing = 0;
354
355         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
356         if (mpp->features && strstr(mpp->features, "queue_if_no_path"))
357                 is_queueing = 1;
358
359         switch (mpp->no_path_retry) {
360         case NO_PATH_RETRY_UNDEF:
361                 break;
362         case NO_PATH_RETRY_FAIL:
363                 if (is_queueing)
364                         dm_queue_if_no_path(mpp->alias, 0);
365                 break;
366         case NO_PATH_RETRY_QUEUE:
367                 if (!is_queueing)
368                         dm_queue_if_no_path(mpp->alias, 1);
369                 break;
370         default:
371                 if (mpp->nr_active > 0) {
372                         mpp->retry_tick = 0;
373                         dm_queue_if_no_path(mpp->alias, 1);
374                 } else if (is_queueing && mpp->retry_tick == 0)
375                         enter_recovery_mode(mpp);
376                 break;
377         }
378 }
379
380 int __setup_multipath(struct vectors *vecs, struct multipath *mpp,
381                       int reset)
382 {
383         if (dm_get_info(mpp->alias, &mpp->dmi)) {
384                 /* Error accessing table */
385                 condlog(3, "%s: cannot access table", mpp->alias);
386                 goto out;
387         }
388
389         if (update_multipath_strings(mpp, vecs->pathvec, 1)) {
390                 condlog(0, "%s: failed to setup multipath", mpp->alias);
391                 goto out;
392         }
393
394         if (reset) {
395                 set_no_path_retry(mpp);
396                 if (VECTOR_SIZE(mpp->paths) != 0)
397                         dm_cancel_deferred_remove(mpp);
398         }
399
400         return 0;
401 out:
402         remove_map_and_stop_waiter(mpp, vecs);
403         return 1;
404 }
405
406 int update_multipath (struct vectors *vecs, char *mapname, int reset)
407 {
408         struct multipath *mpp;
409         struct pathgroup  *pgp;
410         struct path *pp;
411         int i, j;
412
413         mpp = find_mp_by_alias(vecs->mpvec, mapname);
414
415         if (!mpp) {
416                 condlog(3, "%s: multipath map not found", mapname);
417                 return 2;
418         }
419
420         if (__setup_multipath(vecs, mpp, reset))
421                 return 1; /* mpp freed in setup_multipath */
422
423         /*
424          * compare checkers states with DM states
425          */
426         vector_foreach_slot (mpp->pg, pgp, i) {
427                 vector_foreach_slot (pgp->paths, pp, j) {
428                         if (pp->dmstate != PSTATE_FAILED)
429                                 continue;
430
431                         if (pp->state != PATH_DOWN) {
432                                 struct config *conf;
433                                 int oldstate = pp->state;
434                                 int checkint;
435
436                                 conf = get_multipath_config();
437                                 checkint = conf->checkint;
438                                 put_multipath_config(conf);
439                                 condlog(2, "%s: mark as failed", pp->dev);
440                                 mpp->stat_path_failures++;
441                                 pp->state = PATH_DOWN;
442                                 if (oldstate == PATH_UP ||
443                                     oldstate == PATH_GHOST)
444                                         update_queue_mode_del_path(mpp);
445
446                                 /*
447                                  * if opportune,
448                                  * schedule the next check earlier
449                                  */
450                                 if (pp->tick > checkint)
451                                         pp->tick = checkint;
452                         }
453                 }
454         }
455         return 0;
456 }
457
458 static int
459 update_map (struct multipath *mpp, struct vectors *vecs, int new_map)
460 {
461         int retries = 3;
462         char params[PARAMS_SIZE] = {0};
463
464 retry:
465         condlog(4, "%s: updating new map", mpp->alias);
466         if (adopt_paths(vecs->pathvec, mpp)) {
467                 condlog(0, "%s: failed to adopt paths for new map update",
468                         mpp->alias);
469                 retries = -1;
470                 goto fail;
471         }
472         verify_paths(mpp, vecs);
473         mpp->action = ACT_RELOAD;
474
475         extract_hwe_from_path(mpp);
476         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
477                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
478                 retries = -1;
479                 goto fail;
480         }
481         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
482                 condlog(0, "%s: map_udate sleep", mpp->alias);
483                 sleep(1);
484                 goto retry;
485         }
486         dm_lib_release();
487
488 fail:
489         if (new_map && (retries < 0 || wait_for_events(mpp, vecs))) {
490                 condlog(0, "%s: failed to create new map", mpp->alias);
491                 remove_map(mpp, vecs, 1);
492                 return 1;
493         }
494
495         if (setup_multipath(vecs, mpp))
496                 return 1;
497
498         sync_map_state(mpp);
499
500         if (retries < 0)
501                 condlog(0, "%s: failed reload in new map update", mpp->alias);
502         return 0;
503 }
504
505 static struct multipath *
506 add_map_without_path (struct vectors *vecs, const char *alias)
507 {
508         struct multipath * mpp = alloc_multipath();
509         struct config *conf;
510
511         if (!mpp)
512                 return NULL;
513         if (!alias) {
514                 FREE(mpp);
515                 return NULL;
516         }
517
518         mpp->alias = STRDUP(alias);
519
520         if (dm_get_info(mpp->alias, &mpp->dmi)) {
521                 condlog(3, "%s: cannot access table", mpp->alias);
522                 goto out;
523         }
524         set_multipath_wwid(mpp);
525         conf = get_multipath_config();
526         mpp->mpe = find_mpe(conf->mptable, mpp->wwid);
527         put_multipath_config(conf);
528
529         if (update_multipath_table(mpp, vecs->pathvec, 1))
530                 goto out;
531         if (update_multipath_status(mpp))
532                 goto out;
533
534         if (!vector_alloc_slot(vecs->mpvec))
535                 goto out;
536
537         vector_set_slot(vecs->mpvec, mpp);
538
539         if (update_map(mpp, vecs, 1) != 0) /* map removed */
540                 return NULL;
541
542         return mpp;
543 out:
544         remove_map(mpp, vecs, PURGE_VEC);
545         return NULL;
546 }
547
548 static int
549 coalesce_maps(struct vectors *vecs, vector nmpv)
550 {
551         struct multipath * ompp;
552         vector ompv = vecs->mpvec;
553         unsigned int i, reassign_maps;
554         struct config *conf;
555
556         conf = get_multipath_config();
557         reassign_maps = conf->reassign_maps;
558         put_multipath_config(conf);
559         vector_foreach_slot (ompv, ompp, i) {
560                 condlog(3, "%s: coalesce map", ompp->alias);
561                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
562                         /*
563                          * remove all current maps not allowed by the
564                          * current configuration
565                          */
566                         if (dm_flush_map(ompp->alias)) {
567                                 condlog(0, "%s: unable to flush devmap",
568                                         ompp->alias);
569                                 /*
570                                  * may be just because the device is open
571                                  */
572                                 if (setup_multipath(vecs, ompp) != 0) {
573                                         i--;
574                                         continue;
575                                 }
576                                 if (!vector_alloc_slot(nmpv))
577                                         return 1;
578
579                                 vector_set_slot(nmpv, ompp);
580
581                                 vector_del_slot(ompv, i);
582                                 i--;
583                         }
584                         else {
585                                 dm_lib_release();
586                                 condlog(2, "%s devmap removed", ompp->alias);
587                         }
588                 } else if (reassign_maps) {
589                         condlog(3, "%s: Reassign existing device-mapper"
590                                 " devices", ompp->alias);
591                         dm_reassign(ompp->alias);
592                 }
593         }
594         return 0;
595 }
596
597 static void
598 sync_maps_state(vector mpvec)
599 {
600         unsigned int i;
601         struct multipath *mpp;
602
603         vector_foreach_slot (mpvec, mpp, i)
604                 sync_map_state(mpp);
605 }
606
607 static int
608 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
609 {
610         int r;
611
612         if (nopaths)
613                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
614         else
615                 r = dm_flush_map(mpp->alias);
616         /*
617          * clear references to this map before flushing so we can ignore
618          * the spurious uevent we may generate with the dm_flush_map call below
619          */
620         if (r) {
621                 /*
622                  * May not really be an error -- if the map was already flushed
623                  * from the device mapper by dmsetup(8) for instance.
624                  */
625                 if (r == 1)
626                         condlog(0, "%s: can't flush", mpp->alias);
627                 else {
628                         condlog(2, "%s: devmap deferred remove", mpp->alias);
629                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
630                 }
631                 return r;
632         }
633         else {
634                 dm_lib_release();
635                 condlog(2, "%s: map flushed", mpp->alias);
636         }
637
638         orphan_paths(vecs->pathvec, mpp);
639         remove_map_and_stop_waiter(mpp, vecs);
640
641         return 0;
642 }
643
644 static int
645 uev_add_map (struct uevent * uev, struct vectors * vecs)
646 {
647         char *alias;
648         int major = -1, minor = -1, rc;
649
650         condlog(3, "%s: add map (uevent)", uev->kernel);
651         alias = uevent_get_dm_name(uev);
652         if (!alias) {
653                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
654                 major = uevent_get_major(uev);
655                 minor = uevent_get_minor(uev);
656                 alias = dm_mapname(major, minor);
657                 if (!alias) {
658                         condlog(2, "%s: mapname not found for %d:%d",
659                                 uev->kernel, major, minor);
660                         return 1;
661                 }
662         }
663         pthread_cleanup_push(cleanup_lock, &vecs->lock);
664         lock(&vecs->lock);
665         pthread_testcancel();
666         rc = ev_add_map(uev->kernel, alias, vecs);
667         lock_cleanup_pop(vecs->lock);
668         FREE(alias);
669         return rc;
670 }
671
672 /*
673  * ev_add_map expects that the multipath device already exists in kernel
674  * before it is called. It just adds a device to multipathd or updates an
675  * existing device.
676  */
677 int
678 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
679 {
680         struct multipath * mpp;
681         int delayed_reconfig, reassign_maps;
682         struct config *conf;
683
684         if (!dm_is_mpath(alias)) {
685                 condlog(4, "%s: not a multipath map", alias);
686                 return 0;
687         }
688
689         mpp = find_mp_by_alias(vecs->mpvec, alias);
690
691         if (mpp) {
692                 if (mpp->wait_for_udev > 1) {
693                         condlog(2, "%s: performing delayed actions",
694                                 mpp->alias);
695                         if (update_map(mpp, vecs, 0))
696                                 /* setup multipathd removed the map */
697                                 return 1;
698                 }
699                 conf = get_multipath_config();
700                 delayed_reconfig = conf->delayed_reconfig;
701                 reassign_maps = conf->reassign_maps;
702                 put_multipath_config(conf);
703                 if (mpp->wait_for_udev) {
704                         mpp->wait_for_udev = 0;
705                         if (delayed_reconfig &&
706                             !need_to_delay_reconfig(vecs)) {
707                                 condlog(2, "reconfigure (delayed)");
708                                 set_config_state(DAEMON_CONFIGURE);
709                                 return 0;
710                         }
711                 }
712                 /*
713                  * Not really an error -- we generate our own uevent
714                  * if we create a multipath mapped device as a result
715                  * of uev_add_path
716                  */
717                 if (reassign_maps) {
718                         condlog(3, "%s: Reassign existing device-mapper devices",
719                                 alias);
720                         dm_reassign(alias);
721                 }
722                 return 0;
723         }
724         condlog(2, "%s: adding map", alias);
725
726         /*
727          * now we can register the map
728          */
729         if ((mpp = add_map_without_path(vecs, alias))) {
730                 sync_map_state(mpp);
731                 condlog(2, "%s: devmap %s registered", alias, dev);
732                 return 0;
733         } else {
734                 condlog(2, "%s: ev_add_map failed", dev);
735                 return 1;
736         }
737 }
738
739 static int
740 uev_remove_map (struct uevent * uev, struct vectors * vecs)
741 {
742         char *alias;
743         int minor;
744         struct multipath *mpp;
745
746         condlog(3, "%s: remove map (uevent)", uev->kernel);
747         alias = uevent_get_dm_name(uev);
748         if (!alias) {
749                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
750                 return 0;
751         }
752         minor = uevent_get_minor(uev);
753
754         pthread_cleanup_push(cleanup_lock, &vecs->lock);
755         lock(&vecs->lock);
756         pthread_testcancel();
757         mpp = find_mp_by_minor(vecs->mpvec, minor);
758
759         if (!mpp) {
760                 condlog(2, "%s: devmap not registered, can't remove",
761                         uev->kernel);
762                 goto out;
763         }
764         if (strcmp(mpp->alias, alias)) {
765                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
766                         mpp->alias, mpp->dmi->minor, minor);
767                 goto out;
768         }
769
770         orphan_paths(vecs->pathvec, mpp);
771         remove_map_and_stop_waiter(mpp, vecs);
772 out:
773         lock_cleanup_pop(vecs->lock);
774         FREE(alias);
775         return 0;
776 }
777
778 /* Called from CLI handler */
779 int
780 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
781 {
782         struct multipath * mpp;
783
784         mpp = find_mp_by_minor(vecs->mpvec, minor);
785
786         if (!mpp) {
787                 condlog(2, "%s: devmap not registered, can't remove",
788                         devname);
789                 return 1;
790         }
791         if (strcmp(mpp->alias, alias)) {
792                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
793                         mpp->alias, mpp->dmi->minor, minor);
794                 return 1;
795         }
796         return flush_map(mpp, vecs, 0);
797 }
798
799 static int
800 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
801 {
802         struct path *pp;
803         int ret = 0, i;
804         struct config *conf;
805
806         condlog(3, "%s: add path (uevent)", uev->kernel);
807         if (strstr(uev->kernel, "..") != NULL) {
808                 /*
809                  * Don't allow relative device names in the pathvec
810                  */
811                 condlog(0, "%s: path name is invalid", uev->kernel);
812                 return 1;
813         }
814
815         pthread_cleanup_push(cleanup_lock, &vecs->lock);
816         lock(&vecs->lock);
817         pthread_testcancel();
818         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
819         if (pp) {
820                 int r;
821
822                 condlog(3, "%s: spurious uevent, path already in pathvec",
823                         uev->kernel);
824                 if (!pp->mpp && !strlen(pp->wwid)) {
825                         condlog(3, "%s: reinitialize path", uev->kernel);
826                         udev_device_unref(pp->udev);
827                         pp->udev = udev_device_ref(uev->udev);
828                         conf = get_multipath_config();
829                         pthread_cleanup_push(put_multipath_config, conf);
830                         r = pathinfo(pp, conf,
831                                      DI_ALL | DI_BLACKLIST);
832                         pthread_cleanup_pop(1);
833                         if (r == PATHINFO_OK)
834                                 ret = ev_add_path(pp, vecs, need_do_map);
835                         else if (r == PATHINFO_SKIPPED) {
836                                 condlog(3, "%s: remove blacklisted path",
837                                         uev->kernel);
838                                 i = find_slot(vecs->pathvec, (void *)pp);
839                                 if (i != -1)
840                                         vector_del_slot(vecs->pathvec, i);
841                                 free_path(pp);
842                         } else {
843                                 condlog(0, "%s: failed to reinitialize path",
844                                         uev->kernel);
845                                 ret = 1;
846                         }
847                 }
848         }
849         lock_cleanup_pop(vecs->lock);
850         if (pp)
851                 return ret;
852
853         /*
854          * get path vital state
855          */
856         conf = get_multipath_config();
857         pthread_cleanup_push(put_multipath_config, conf);
858         ret = alloc_path_with_pathinfo(conf, uev->udev,
859                                        uev->wwid, DI_ALL, &pp);
860         pthread_cleanup_pop(1);
861         if (!pp) {
862                 if (ret == PATHINFO_SKIPPED)
863                         return 0;
864                 condlog(3, "%s: failed to get path info", uev->kernel);
865                 return 1;
866         }
867         pthread_cleanup_push(cleanup_lock, &vecs->lock);
868         lock(&vecs->lock);
869         pthread_testcancel();
870         ret = store_path(vecs->pathvec, pp);
871         if (!ret) {
872                 conf = get_multipath_config();
873                 pp->checkint = conf->checkint;
874                 put_multipath_config(conf);
875                 ret = ev_add_path(pp, vecs, need_do_map);
876         } else {
877                 condlog(0, "%s: failed to store path info, "
878                         "dropping event",
879                         uev->kernel);
880                 free_path(pp);
881                 ret = 1;
882         }
883         lock_cleanup_pop(vecs->lock);
884         return ret;
885 }
886
887 /*
888  * returns:
889  * 0: added
890  * 1: error
891  */
892 int
893 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
894 {
895         struct multipath * mpp;
896         char params[PARAMS_SIZE] = {0};
897         int retries = 3;
898         int start_waiter = 0;
899         int ret;
900
901         /*
902          * need path UID to go any further
903          */
904         if (strlen(pp->wwid) == 0) {
905                 condlog(0, "%s: failed to get path uid", pp->dev);
906                 goto fail; /* leave path added to pathvec */
907         }
908         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
909         if (mpp && mpp->wait_for_udev &&
910             (pathcount(mpp, PATH_UP) > 0 ||
911              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
912               mpp->ghost_delay_tick <= 0))) {
913                 /* if wait_for_udev is set and valid paths exist */
914                 condlog(3, "%s: delaying path addition until %s is fully initialized",
915                         pp->dev, mpp->alias);
916                 mpp->wait_for_udev = 2;
917                 orphan_path(pp, "waiting for create to complete");
918                 return 0;
919         }
920
921         pp->mpp = mpp;
922 rescan:
923         if (mpp) {
924                 if (pp->size && mpp->size != pp->size) {
925                         condlog(0, "%s: failed to add new path %s, "
926                                 "device size mismatch",
927                                 mpp->alias, pp->dev);
928                         int i = find_slot(vecs->pathvec, (void *)pp);
929                         if (i != -1)
930                                 vector_del_slot(vecs->pathvec, i);
931                         free_path(pp);
932                         return 1;
933                 }
934
935                 condlog(4,"%s: adopting all paths for path %s",
936                         mpp->alias, pp->dev);
937                 if (adopt_paths(vecs->pathvec, mpp))
938                         goto fail; /* leave path added to pathvec */
939
940                 verify_paths(mpp, vecs);
941                 mpp->action = ACT_RELOAD;
942                 extract_hwe_from_path(mpp);
943         } else {
944                 if (!should_multipath(pp, vecs->pathvec, vecs->mpvec)) {
945                         orphan_path(pp, "only one path");
946                         return 0;
947                 }
948                 condlog(4,"%s: creating new map", pp->dev);
949                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
950                         mpp->action = ACT_CREATE;
951                         /*
952                          * We don't depend on ACT_CREATE, as domap will
953                          * set it to ACT_NOTHING when complete.
954                          */
955                         start_waiter = 1;
956                 }
957                 if (!start_waiter)
958                         goto fail; /* leave path added to pathvec */
959         }
960
961         /* persistent reservation check*/
962         mpath_pr_event_handle(pp);
963
964         if (!need_do_map)
965                 return 0;
966
967         if (!dm_map_present(mpp->alias)) {
968                 mpp->action = ACT_CREATE;
969                 start_waiter = 1;
970         }
971         /*
972          * push the map to the device-mapper
973          */
974         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
975                 condlog(0, "%s: failed to setup map for addition of new "
976                         "path %s", mpp->alias, pp->dev);
977                 goto fail_map;
978         }
979         /*
980          * reload the map for the multipath mapped device
981          */
982 retry:
983         ret = domap(mpp, params, 1);
984         if (ret <= 0) {
985                 if (ret < 0 && retries-- > 0) {
986                         condlog(0, "%s: retry domap for addition of new "
987                                 "path %s", mpp->alias, pp->dev);
988                         sleep(1);
989                         goto retry;
990                 }
991                 condlog(0, "%s: failed in domap for addition of new "
992                         "path %s", mpp->alias, pp->dev);
993                 /*
994                  * deal with asynchronous uevents :((
995                  */
996                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
997                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
998                         sleep(1);
999                         update_mpp_paths(mpp, vecs->pathvec);
1000                         goto rescan;
1001                 }
1002                 else if (mpp->action == ACT_RELOAD)
1003                         condlog(0, "%s: giving up reload", mpp->alias);
1004                 else
1005                         goto fail_map;
1006         }
1007         dm_lib_release();
1008
1009         if ((mpp->action == ACT_CREATE ||
1010              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
1011             wait_for_events(mpp, vecs))
1012                         goto fail_map;
1013
1014         /*
1015          * update our state from kernel regardless of create or reload
1016          */
1017         if (setup_multipath(vecs, mpp))
1018                 goto fail; /* if setup_multipath fails, it removes the map */
1019
1020         sync_map_state(mpp);
1021
1022         if (retries >= 0) {
1023                 condlog(2, "%s [%s]: path added to devmap %s",
1024                         pp->dev, pp->dev_t, mpp->alias);
1025                 return 0;
1026         } else
1027                 goto fail;
1028
1029 fail_map:
1030         remove_map(mpp, vecs, 1);
1031 fail:
1032         orphan_path(pp, "failed to add path");
1033         return 1;
1034 }
1035
1036 static int
1037 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
1038 {
1039         struct path *pp;
1040         int ret;
1041
1042         condlog(3, "%s: remove path (uevent)", uev->kernel);
1043         delete_foreign(uev->udev);
1044
1045         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1046         lock(&vecs->lock);
1047         pthread_testcancel();
1048         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1049         if (pp)
1050                 ret = ev_remove_path(pp, vecs, need_do_map);
1051         lock_cleanup_pop(vecs->lock);
1052         if (!pp) {
1053                 /* Not an error; path might have been purged earlier */
1054                 condlog(0, "%s: path already removed", uev->kernel);
1055                 return 0;
1056         }
1057         return ret;
1058 }
1059
1060 int
1061 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
1062 {
1063         struct multipath * mpp;
1064         int i, retval = 0;
1065         char params[PARAMS_SIZE] = {0};
1066
1067         /*
1068          * avoid referring to the map of an orphaned path
1069          */
1070         if ((mpp = pp->mpp)) {
1071                 /*
1072                  * transform the mp->pg vector of vectors of paths
1073                  * into a mp->params string to feed the device-mapper
1074                  */
1075                 if (update_mpp_paths(mpp, vecs->pathvec)) {
1076                         condlog(0, "%s: failed to update paths",
1077                                 mpp->alias);
1078                         goto fail;
1079                 }
1080
1081                 /*
1082                  * Make sure mpp->hwe doesn't point to freed memory
1083                  * We call extract_hwe_from_path() below to restore mpp->hwe
1084                  */
1085                 if (mpp->hwe == pp->hwe)
1086                         mpp->hwe = NULL;
1087
1088                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
1089                         vector_del_slot(mpp->paths, i);
1090
1091                 /*
1092                  * remove the map IF removing the last path
1093                  */
1094                 if (VECTOR_SIZE(mpp->paths) == 0) {
1095                         char alias[WWID_SIZE];
1096
1097                         /*
1098                          * flush_map will fail if the device is open
1099                          */
1100                         strlcpy(alias, mpp->alias, WWID_SIZE);
1101                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
1102                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
1103                                 mpp->retry_tick = 0;
1104                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
1105                                 mpp->disable_queueing = 1;
1106                                 mpp->stat_map_failures++;
1107                                 dm_queue_if_no_path(mpp->alias, 0);
1108                         }
1109                         if (!flush_map(mpp, vecs, 1)) {
1110                                 condlog(2, "%s: removed map after"
1111                                         " removing all paths",
1112                                         alias);
1113                                 retval = 0;
1114                                 goto out;
1115                         }
1116                         /*
1117                          * Not an error, continue
1118                          */
1119                 }
1120
1121                 if (mpp->hwe == NULL)
1122                         extract_hwe_from_path(mpp);
1123
1124                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1125                         condlog(0, "%s: failed to setup map for"
1126                                 " removal of path %s", mpp->alias, pp->dev);
1127                         goto fail;
1128                 }
1129
1130                 if (mpp->wait_for_udev) {
1131                         mpp->wait_for_udev = 2;
1132                         goto out;
1133                 }
1134
1135                 if (!need_do_map)
1136                         goto out;
1137                 /*
1138                  * reload the map
1139                  */
1140                 mpp->action = ACT_RELOAD;
1141                 if (domap(mpp, params, 1) <= 0) {
1142                         condlog(0, "%s: failed in domap for "
1143                                 "removal of path %s",
1144                                 mpp->alias, pp->dev);
1145                         retval = 1;
1146                 } else {
1147                         /*
1148                          * update our state from kernel
1149                          */
1150                         if (setup_multipath(vecs, mpp))
1151                                 return 1;
1152                         sync_map_state(mpp);
1153
1154                         condlog(2, "%s [%s]: path removed from map %s",
1155                                 pp->dev, pp->dev_t, mpp->alias);
1156                 }
1157         }
1158
1159 out:
1160         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
1161                 vector_del_slot(vecs->pathvec, i);
1162
1163         free_path(pp);
1164
1165         return retval;
1166
1167 fail:
1168         remove_map_and_stop_waiter(mpp, vecs);
1169         return 1;
1170 }
1171
1172 static int
1173 uev_update_path (struct uevent *uev, struct vectors * vecs)
1174 {
1175         int ro, retval = 0, rc;
1176         struct path * pp;
1177         struct config *conf;
1178         int disable_changed_wwids;
1179         int needs_reinit = 0;
1180
1181         switch ((rc = change_foreign(uev->udev))) {
1182         case FOREIGN_OK:
1183                 /* known foreign path, ignore event */
1184                 return 0;
1185         case FOREIGN_IGNORED:
1186                 break;
1187         case FOREIGN_ERR:
1188                 condlog(3, "%s: error in change_foreign", __func__);
1189                 break;
1190         default:
1191                 condlog(1, "%s: return code %d of change_forein is unsupported",
1192                         __func__, rc);
1193                 break;
1194         }
1195
1196         conf = get_multipath_config();
1197         disable_changed_wwids = conf->disable_changed_wwids;
1198         put_multipath_config(conf);
1199
1200         ro = uevent_get_disk_ro(uev);
1201
1202         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1203         lock(&vecs->lock);
1204         pthread_testcancel();
1205
1206         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
1207         if (pp) {
1208                 struct multipath *mpp = pp->mpp;
1209                 char wwid[WWID_SIZE];
1210
1211                 strcpy(wwid, pp->wwid);
1212                 get_uid(pp, pp->state, uev->udev);
1213
1214                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
1215                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
1216                                 uev->kernel, wwid, pp->wwid,
1217                                 (disable_changed_wwids ? "disallowing" :
1218                                  "continuing"));
1219                         if (disable_changed_wwids &&
1220                             (strlen(wwid) || pp->wwid_changed)) {
1221                                 strcpy(pp->wwid, wwid);
1222                                 if (!pp->wwid_changed) {
1223                                         pp->wwid_changed = 1;
1224                                         pp->tick = 1;
1225                                         if (pp->mpp)
1226                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
1227                                 }
1228                                 goto out;
1229                         } else if (!disable_changed_wwids)
1230                                 strcpy(pp->wwid, wwid);
1231                         else
1232                                 pp->wwid_changed = 0;
1233                 } else {
1234                         udev_device_unref(pp->udev);
1235                         pp->udev = udev_device_ref(uev->udev);
1236                         conf = get_multipath_config();
1237                         pthread_cleanup_push(put_multipath_config, conf);
1238                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
1239                                 condlog(1, "%s: pathinfo failed after change uevent",
1240                                         uev->kernel);
1241                         pthread_cleanup_pop(1);
1242                 }
1243
1244                 if (pp->initialized == INIT_REQUESTED_UDEV)
1245                         needs_reinit = 1;
1246                 else if (mpp && ro >= 0) {
1247                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1248
1249                         if (mpp->wait_for_udev)
1250                                 mpp->wait_for_udev = 2;
1251                         else {
1252                                 if (ro == 1)
1253                                         pp->mpp->force_readonly = 1;
1254                                 retval = reload_map(vecs, mpp, 0, 1);
1255                                 pp->mpp->force_readonly = 0;
1256                                 condlog(2, "%s: map %s reloaded (retval %d)",
1257                                         uev->kernel, mpp->alias, retval);
1258                         }
1259                 }
1260         }
1261 out:
1262         lock_cleanup_pop(vecs->lock);
1263         if (!pp) {
1264                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1265                 if (uev->udev) {
1266                         int flag = DI_SYSFS | DI_WWID;
1267
1268                         conf = get_multipath_config();
1269                         pthread_cleanup_push(put_multipath_config, conf);
1270                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1271                         pthread_cleanup_pop(1);
1272
1273                         if (retval == PATHINFO_SKIPPED) {
1274                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1275                                 return 0;
1276                         }
1277                 }
1278
1279                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1280         }
1281         if (needs_reinit)
1282                 retval = uev_add_path(uev, vecs, 1);
1283         return retval;
1284 }
1285
1286 static int
1287 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1288 {
1289         char *action = NULL, *devt = NULL;
1290         struct path *pp;
1291         int r = 1;
1292
1293         action = uevent_get_dm_action(uev);
1294         if (!action)
1295                 return 1;
1296         if (strncmp(action, "PATH_FAILED", 11))
1297                 goto out;
1298         devt = uevent_get_dm_path(uev);
1299         if (!devt) {
1300                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1301                 goto out;
1302         }
1303
1304         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1305         lock(&vecs->lock);
1306         pthread_testcancel();
1307         pp = find_path_by_devt(vecs->pathvec, devt);
1308         if (!pp)
1309                 goto out_lock;
1310         r = io_err_stat_handle_pathfail(pp);
1311         if (r)
1312                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1313                                 pp->dev);
1314 out_lock:
1315         lock_cleanup_pop(vecs->lock);
1316         FREE(devt);
1317         FREE(action);
1318         return r;
1319 out:
1320         FREE(action);
1321         return 1;
1322 }
1323
1324 static int
1325 map_discovery (struct vectors * vecs)
1326 {
1327         struct multipath * mpp;
1328         unsigned int i;
1329
1330         if (dm_get_maps(vecs->mpvec))
1331                 return 1;
1332
1333         vector_foreach_slot (vecs->mpvec, mpp, i)
1334                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1335                     update_multipath_status(mpp)) {
1336                         remove_map(mpp, vecs, 1);
1337                         i--;
1338                 }
1339
1340         return 0;
1341 }
1342
1343 int
1344 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1345                 void * trigger_data)
1346 {
1347         struct vectors * vecs;
1348         int r;
1349
1350         *reply = NULL;
1351         *len = 0;
1352         vecs = (struct vectors *)trigger_data;
1353
1354         if ((str != NULL) && (is_root == false) &&
1355             (strncmp(str, "list", strlen("list")) != 0) &&
1356             (strncmp(str, "show", strlen("show")) != 0)) {
1357                 *reply = STRDUP("permission deny: need to be root");
1358                 if (*reply)
1359                         *len = strlen(*reply) + 1;
1360                 return 1;
1361         }
1362
1363         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1364
1365         if (r > 0) {
1366                 if (r == ETIMEDOUT)
1367                         *reply = STRDUP("timeout\n");
1368                 else
1369                         *reply = STRDUP("fail\n");
1370                 if (*reply)
1371                         *len = strlen(*reply) + 1;
1372                 r = 1;
1373         }
1374         else if (!r && *len == 0) {
1375                 *reply = STRDUP("ok\n");
1376                 if (*reply)
1377                         *len = strlen(*reply) + 1;
1378                 r = 0;
1379         }
1380         /* else if (r < 0) leave *reply alone */
1381
1382         return r;
1383 }
1384
1385 int
1386 uev_trigger (struct uevent * uev, void * trigger_data)
1387 {
1388         int r = 0;
1389         struct vectors * vecs;
1390         struct uevent *merge_uev, *tmp;
1391
1392         vecs = (struct vectors *)trigger_data;
1393
1394         pthread_cleanup_push(config_cleanup, NULL);
1395         pthread_mutex_lock(&config_lock);
1396         if (running_state != DAEMON_IDLE &&
1397             running_state != DAEMON_RUNNING)
1398                 pthread_cond_wait(&config_cond, &config_lock);
1399         pthread_cleanup_pop(1);
1400
1401         if (running_state == DAEMON_SHUTDOWN)
1402                 return 0;
1403
1404         /*
1405          * device map event
1406          * Add events are ignored here as the tables
1407          * are not fully initialised then.
1408          */
1409         if (!strncmp(uev->kernel, "dm-", 3)) {
1410                 if (!uevent_is_mpath(uev)) {
1411                         if (!strncmp(uev->action, "change", 6))
1412                                 (void)add_foreign(uev->udev);
1413                         else if (!strncmp(uev->action, "remove", 6))
1414                                 (void)delete_foreign(uev->udev);
1415                         goto out;
1416                 }
1417                 if (!strncmp(uev->action, "change", 6)) {
1418                         r = uev_add_map(uev, vecs);
1419
1420                         /*
1421                          * the kernel-side dm-mpath issues a PATH_FAILED event
1422                          * when it encounters a path IO error. It is reason-
1423                          * able be the entry of path IO error accounting pro-
1424                          * cess.
1425                          */
1426                         uev_pathfail_check(uev, vecs);
1427                 } else if (!strncmp(uev->action, "remove", 6)) {
1428                         r = uev_remove_map(uev, vecs);
1429                 }
1430                 goto out;
1431         }
1432
1433         /*
1434          * path add/remove/change event, add/remove maybe merged
1435          */
1436         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1437                 if (!strncmp(merge_uev->action, "add", 3))
1438                         r += uev_add_path(merge_uev, vecs, 0);
1439                 if (!strncmp(merge_uev->action, "remove", 6))
1440                         r += uev_remove_path(merge_uev, vecs, 0);
1441         }
1442
1443         if (!strncmp(uev->action, "add", 3))
1444                 r += uev_add_path(uev, vecs, 1);
1445         if (!strncmp(uev->action, "remove", 6))
1446                 r += uev_remove_path(uev, vecs, 1);
1447         if (!strncmp(uev->action, "change", 6))
1448                 r += uev_update_path(uev, vecs);
1449
1450 out:
1451         return r;
1452 }
1453
1454 static void rcu_unregister(void *param)
1455 {
1456         rcu_unregister_thread();
1457 }
1458
1459 static void *
1460 ueventloop (void * ap)
1461 {
1462         struct udev *udev = ap;
1463
1464         pthread_cleanup_push(rcu_unregister, NULL);
1465         rcu_register_thread();
1466         if (uevent_listen(udev))
1467                 condlog(0, "error starting uevent listener");
1468         pthread_cleanup_pop(1);
1469         return NULL;
1470 }
1471
1472 static void *
1473 uevqloop (void * ap)
1474 {
1475         pthread_cleanup_push(rcu_unregister, NULL);
1476         rcu_register_thread();
1477         if (uevent_dispatch(&uev_trigger, ap))
1478                 condlog(0, "error starting uevent dispatcher");
1479         pthread_cleanup_pop(1);
1480         return NULL;
1481 }
1482 static void *
1483 uxlsnrloop (void * ap)
1484 {
1485         if (cli_init()) {
1486                 condlog(1, "Failed to init uxsock listener");
1487                 return NULL;
1488         }
1489         pthread_cleanup_push(rcu_unregister, NULL);
1490         rcu_register_thread();
1491         set_handler_callback(LIST+PATHS, cli_list_paths);
1492         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1493         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1494         set_handler_callback(LIST+PATH, cli_list_path);
1495         set_handler_callback(LIST+MAPS, cli_list_maps);
1496         set_handler_callback(LIST+STATUS, cli_list_status);
1497         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1498         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1499         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1500         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1501         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1502         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1503         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1504         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1505         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1506         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1507         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1508         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1509         set_handler_callback(LIST+CONFIG+LOCAL, cli_list_config_local);
1510         set_handler_callback(LIST+CONFIG, cli_list_config);
1511         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1512         set_handler_callback(LIST+DEVICES, cli_list_devices);
1513         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1514         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1515         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1516         set_handler_callback(ADD+PATH, cli_add_path);
1517         set_handler_callback(DEL+PATH, cli_del_path);
1518         set_handler_callback(ADD+MAP, cli_add_map);
1519         set_handler_callback(DEL+MAP, cli_del_map);
1520         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1521         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1522         set_handler_callback(SUSPEND+MAP, cli_suspend);
1523         set_handler_callback(RESUME+MAP, cli_resume);
1524         set_handler_callback(RESIZE+MAP, cli_resize);
1525         set_handler_callback(RELOAD+MAP, cli_reload);
1526         set_handler_callback(RESET+MAP, cli_reassign);
1527         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1528         set_handler_callback(FAIL+PATH, cli_fail);
1529         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1530         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1531         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1532         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1533         set_unlocked_handler_callback(QUIT, cli_quit);
1534         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1535         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1536         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1537         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1538         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1539         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1540         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1541         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1542         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1543
1544         umask(077);
1545         uxsock_listen(&uxsock_trigger, ap);
1546         pthread_cleanup_pop(1);
1547         return NULL;
1548 }
1549
1550 void
1551 exit_daemon (void)
1552 {
1553         post_config_state(DAEMON_SHUTDOWN);
1554 }
1555
1556 static void
1557 fail_path (struct path * pp, int del_active)
1558 {
1559         if (!pp->mpp)
1560                 return;
1561
1562         condlog(2, "checker failed path %s in map %s",
1563                  pp->dev_t, pp->mpp->alias);
1564
1565         dm_fail_path(pp->mpp->alias, pp->dev_t);
1566         if (del_active)
1567                 update_queue_mode_del_path(pp->mpp);
1568 }
1569
1570 /*
1571  * caller must have locked the path list before calling that function
1572  */
1573 static int
1574 reinstate_path (struct path * pp, int add_active)
1575 {
1576         int ret = 0;
1577
1578         if (!pp->mpp)
1579                 return 0;
1580
1581         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1582                 condlog(0, "%s: reinstate failed", pp->dev_t);
1583                 ret = 1;
1584         } else {
1585                 condlog(2, "%s: reinstated", pp->dev_t);
1586                 if (add_active)
1587                         update_queue_mode_add_path(pp->mpp);
1588         }
1589         return ret;
1590 }
1591
1592 static void
1593 enable_group(struct path * pp)
1594 {
1595         struct pathgroup * pgp;
1596
1597         /*
1598          * if path is added through uev_add_path, pgindex can be unset.
1599          * next update_strings() will set it, upon map reload event.
1600          *
1601          * we can safely return here, because upon map reload, all
1602          * PG will be enabled.
1603          */
1604         if (!pp->mpp->pg || !pp->pgindex)
1605                 return;
1606
1607         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1608
1609         if (pgp->status == PGSTATE_DISABLED) {
1610                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1611                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1612         }
1613 }
1614
1615 static void
1616 mpvec_garbage_collector (struct vectors * vecs)
1617 {
1618         struct multipath * mpp;
1619         unsigned int i;
1620
1621         if (!vecs->mpvec)
1622                 return;
1623
1624         vector_foreach_slot (vecs->mpvec, mpp, i) {
1625                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1626                         condlog(2, "%s: remove dead map", mpp->alias);
1627                         remove_map_and_stop_waiter(mpp, vecs);
1628                         i--;
1629                 }
1630         }
1631 }
1632
1633 /* This is called after a path has started working again. It the multipath
1634  * device for this path uses the followover failback type, and this is the
1635  * best pathgroup, and this is the first path in the pathgroup to come back
1636  * up, then switch to this pathgroup */
1637 static int
1638 followover_should_failback(struct path * pp)
1639 {
1640         struct pathgroup * pgp;
1641         struct path *pp1;
1642         int i;
1643
1644         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1645             !pp->mpp->pg || !pp->pgindex ||
1646             pp->pgindex != pp->mpp->bestpg)
1647                 return 0;
1648
1649         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1650         vector_foreach_slot(pgp->paths, pp1, i) {
1651                 if (pp1 == pp)
1652                         continue;
1653                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1654                         return 0;
1655         }
1656         return 1;
1657 }
1658
1659 static void
1660 missing_uev_wait_tick(struct vectors *vecs)
1661 {
1662         struct multipath * mpp;
1663         unsigned int i;
1664         int timed_out = 0, delayed_reconfig;
1665         struct config *conf;
1666
1667         vector_foreach_slot (vecs->mpvec, mpp, i) {
1668                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1669                         timed_out = 1;
1670                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1671                         if (mpp->wait_for_udev > 1 &&
1672                             update_map(mpp, vecs, 0)) {
1673                                 /* update_map removed map */
1674                                 i--;
1675                                 continue;
1676                         }
1677                         mpp->wait_for_udev = 0;
1678                 }
1679         }
1680
1681         conf = get_multipath_config();
1682         delayed_reconfig = conf->delayed_reconfig;
1683         put_multipath_config(conf);
1684         if (timed_out && delayed_reconfig &&
1685             !need_to_delay_reconfig(vecs)) {
1686                 condlog(2, "reconfigure (delayed)");
1687                 set_config_state(DAEMON_CONFIGURE);
1688         }
1689 }
1690
1691 static void
1692 ghost_delay_tick(struct vectors *vecs)
1693 {
1694         struct multipath * mpp;
1695         unsigned int i;
1696
1697         vector_foreach_slot (vecs->mpvec, mpp, i) {
1698                 if (mpp->ghost_delay_tick <= 0)
1699                         continue;
1700                 if (--mpp->ghost_delay_tick <= 0) {
1701                         condlog(0, "%s: timed out waiting for active path",
1702                                 mpp->alias);
1703                         mpp->force_udev_reload = 1;
1704                         if (update_map(mpp, vecs, 0) != 0) {
1705                                 /* update_map removed map */
1706                                 i--;
1707                                 continue;
1708                         }
1709                 }
1710         }
1711 }
1712
1713 static void
1714 defered_failback_tick (vector mpvec)
1715 {
1716         struct multipath * mpp;
1717         unsigned int i;
1718
1719         vector_foreach_slot (mpvec, mpp, i) {
1720                 /*
1721                  * deferred failback getting sooner
1722                  */
1723                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1724                         mpp->failback_tick--;
1725
1726                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1727                                 switch_pathgroup(mpp);
1728                 }
1729         }
1730 }
1731
1732 static void
1733 retry_count_tick(vector mpvec)
1734 {
1735         struct multipath *mpp;
1736         unsigned int i;
1737
1738         vector_foreach_slot (mpvec, mpp, i) {
1739                 if (mpp->retry_tick > 0) {
1740                         mpp->stat_total_queueing_time++;
1741                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1742                         if(--mpp->retry_tick == 0) {
1743                                 mpp->stat_map_failures++;
1744                                 dm_queue_if_no_path(mpp->alias, 0);
1745                                 condlog(2, "%s: Disable queueing", mpp->alias);
1746                         }
1747                 }
1748         }
1749 }
1750
1751 int update_prio(struct path *pp, int refresh_all)
1752 {
1753         int oldpriority;
1754         struct path *pp1;
1755         struct pathgroup * pgp;
1756         int i, j, changed = 0;
1757         struct config *conf;
1758
1759         if (refresh_all) {
1760                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1761                         vector_foreach_slot (pgp->paths, pp1, j) {
1762                                 oldpriority = pp1->priority;
1763                                 conf = get_multipath_config();
1764                                 pthread_cleanup_push(put_multipath_config,
1765                                                      conf);
1766                                 pathinfo(pp1, conf, DI_PRIO);
1767                                 pthread_cleanup_pop(1);
1768                                 if (pp1->priority != oldpriority)
1769                                         changed = 1;
1770                         }
1771                 }
1772                 return changed;
1773         }
1774         oldpriority = pp->priority;
1775         conf = get_multipath_config();
1776         pthread_cleanup_push(put_multipath_config, conf);
1777         if (pp->state != PATH_DOWN)
1778                 pathinfo(pp, conf, DI_PRIO);
1779         pthread_cleanup_pop(1);
1780
1781         if (pp->priority == oldpriority)
1782                 return 0;
1783         return 1;
1784 }
1785
1786 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1787 {
1788         if (reload_map(vecs, mpp, refresh, 1))
1789                 return 1;
1790
1791         dm_lib_release();
1792         if (setup_multipath(vecs, mpp) != 0)
1793                 return 1;
1794         sync_map_state(mpp);
1795
1796         return 0;
1797 }
1798
1799 /*
1800  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1801  * and '0' otherwise
1802  */
1803 int
1804 check_path (struct vectors * vecs, struct path * pp, int ticks)
1805 {
1806         int newstate;
1807         int new_path_up = 0;
1808         int chkr_new_path_up = 0;
1809         int add_active;
1810         int disable_reinstate = 0;
1811         int oldchkrstate = pp->chkrstate;
1812         int retrigger_tries, checkint;
1813         struct config *conf;
1814         int ret;
1815
1816         if ((pp->initialized == INIT_OK ||
1817              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1818                 return 0;
1819
1820         if (pp->tick)
1821                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1822         if (pp->tick)
1823                 return 0; /* don't check this path yet */
1824
1825         conf = get_multipath_config();
1826         retrigger_tries = conf->retrigger_tries;
1827         checkint = conf->checkint;
1828         put_multipath_config(conf);
1829         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1830             pp->retriggers < retrigger_tries) {
1831                 condlog(2, "%s: triggering change event to reinitialize",
1832                         pp->dev);
1833                 pp->initialized = INIT_REQUESTED_UDEV;
1834                 pp->retriggers++;
1835                 sysfs_attr_set_value(pp->udev, "uevent", "change",
1836                                      strlen("change"));
1837                 return 0;
1838         }
1839
1840         /*
1841          * provision a next check soonest,
1842          * in case we exit abnormaly from here
1843          */
1844         pp->tick = checkint;
1845
1846         newstate = path_offline(pp);
1847         /*
1848          * Wait for uevent for removed paths;
1849          * some LLDDs like zfcp keep paths unavailable
1850          * without sending uevents.
1851          */
1852         if (newstate == PATH_REMOVED)
1853                 newstate = PATH_DOWN;
1854
1855         if (newstate == PATH_UP) {
1856                 conf = get_multipath_config();
1857                 pthread_cleanup_push(put_multipath_config, conf);
1858                 newstate = get_state(pp, conf, 1, newstate);
1859                 pthread_cleanup_pop(1);
1860         } else
1861                 checker_clear_message(&pp->checker);
1862
1863         if (pp->wwid_changed) {
1864                 condlog(2, "%s: path wwid has changed. Refusing to use",
1865                         pp->dev);
1866                 newstate = PATH_DOWN;
1867         }
1868
1869         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1870                 condlog(2, "%s: unusable path", pp->dev);
1871                 conf = get_multipath_config();
1872                 pthread_cleanup_push(put_multipath_config, conf);
1873                 pathinfo(pp, conf, 0);
1874                 pthread_cleanup_pop(1);
1875                 return 1;
1876         }
1877         if (!pp->mpp) {
1878                 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1879                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1880                         condlog(2, "%s: add missing path", pp->dev);
1881                         conf = get_multipath_config();
1882                         pthread_cleanup_push(put_multipath_config, conf);
1883                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1884                         pthread_cleanup_pop(1);
1885                         if (ret == PATHINFO_OK) {
1886                                 ev_add_path(pp, vecs, 1);
1887                                 pp->tick = 1;
1888                         } else if (ret == PATHINFO_SKIPPED)
1889                                 return -1;
1890                 }
1891                 return 0;
1892         }
1893         /*
1894          * Async IO in flight. Keep the previous path state
1895          * and reschedule as soon as possible
1896          */
1897         if (newstate == PATH_PENDING) {
1898                 pp->tick = 1;
1899                 return 0;
1900         }
1901         /*
1902          * Synchronize with kernel state
1903          */
1904         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1905                 condlog(1, "%s: Could not synchronize with kernel state",
1906                         pp->dev);
1907                 pp->dmstate = PSTATE_UNDEF;
1908         }
1909         /* if update_multipath_strings orphaned the path, quit early */
1910         if (!pp->mpp)
1911                 return 0;
1912
1913         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1914                 pp->state = PATH_SHAKY;
1915                 /*
1916                  * to reschedule as soon as possible,so that this path can
1917                  * be recoverd in time
1918                  */
1919                 pp->tick = 1;
1920                 return 1;
1921         }
1922
1923         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1924              pp->wait_checks > 0) {
1925                 if (pp->mpp->nr_active > 0) {
1926                         pp->state = PATH_DELAYED;
1927                         pp->wait_checks--;
1928                         return 1;
1929                 } else
1930                         pp->wait_checks = 0;
1931         }
1932
1933         /*
1934          * don't reinstate failed path, if its in stand-by
1935          * and if target supports only implicit tpgs mode.
1936          * this will prevent unnecessary i/o by dm on stand-by
1937          * paths if there are no other active paths in map.
1938          */
1939         disable_reinstate = (newstate == PATH_GHOST &&
1940                             pp->mpp->nr_active == 0 &&
1941                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1942
1943         pp->chkrstate = newstate;
1944         if (newstate != pp->state) {
1945                 int oldstate = pp->state;
1946                 pp->state = newstate;
1947
1948                 LOG_MSG(1, checker_message(&pp->checker));
1949
1950                 /*
1951                  * upon state change, reset the checkint
1952                  * to the shortest delay
1953                  */
1954                 conf = get_multipath_config();
1955                 pp->checkint = conf->checkint;
1956                 put_multipath_config(conf);
1957
1958                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1959                         /*
1960                          * proactively fail path in the DM
1961                          */
1962                         if (oldstate == PATH_UP ||
1963                             oldstate == PATH_GHOST) {
1964                                 fail_path(pp, 1);
1965                                 if (pp->mpp->delay_wait_checks > 0 &&
1966                                     pp->watch_checks > 0) {
1967                                         pp->wait_checks = pp->mpp->delay_wait_checks;
1968                                         pp->watch_checks = 0;
1969                                 }
1970                         }else
1971                                 fail_path(pp, 0);
1972
1973                         /*
1974                          * cancel scheduled failback
1975                          */
1976                         pp->mpp->failback_tick = 0;
1977
1978                         pp->mpp->stat_path_failures++;
1979                         return 1;
1980                 }
1981
1982                 if (newstate == PATH_UP || newstate == PATH_GHOST) {
1983                         if (pp->mpp->prflag) {
1984                                 /*
1985                                  * Check Persistent Reservation.
1986                                  */
1987                                 condlog(2, "%s: checking persistent "
1988                                         "reservation registration", pp->dev);
1989                                 mpath_pr_event_handle(pp);
1990                         }
1991                 }
1992
1993                 /*
1994                  * reinstate this path
1995                  */
1996                 if (oldstate != PATH_UP &&
1997                     oldstate != PATH_GHOST) {
1998                         if (pp->mpp->delay_watch_checks > 0)
1999                                 pp->watch_checks = pp->mpp->delay_watch_checks;
2000                         add_active = 1;
2001                 } else {
2002                         if (pp->watch_checks > 0)
2003                                 pp->watch_checks--;
2004                         add_active = 0;
2005                 }
2006                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
2007                         condlog(3, "%s: reload map", pp->dev);
2008                         ev_add_path(pp, vecs, 1);
2009                         pp->tick = 1;
2010                         return 0;
2011                 }
2012                 new_path_up = 1;
2013
2014                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
2015                         chkr_new_path_up = 1;
2016
2017                 /*
2018                  * if at least one path is up in a group, and
2019                  * the group is disabled, re-enable it
2020                  */
2021                 if (newstate == PATH_UP)
2022                         enable_group(pp);
2023         }
2024         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
2025                 if ((pp->dmstate == PSTATE_FAILED ||
2026                     pp->dmstate == PSTATE_UNDEF) &&
2027                     !disable_reinstate) {
2028                         /* Clear IO errors */
2029                         if (reinstate_path(pp, 0)) {
2030                                 condlog(3, "%s: reload map", pp->dev);
2031                                 ev_add_path(pp, vecs, 1);
2032                                 pp->tick = 1;
2033                                 return 0;
2034                         }
2035                 } else {
2036                         unsigned int max_checkint;
2037                         LOG_MSG(4, checker_message(&pp->checker));
2038                         conf = get_multipath_config();
2039                         max_checkint = conf->max_checkint;
2040                         put_multipath_config(conf);
2041                         if (pp->checkint != max_checkint) {
2042                                 /*
2043                                  * double the next check delay.
2044                                  * max at conf->max_checkint
2045                                  */
2046                                 if (pp->checkint < (max_checkint / 2))
2047                                         pp->checkint = 2 * pp->checkint;
2048                                 else
2049                                         pp->checkint = max_checkint;
2050
2051                                 condlog(4, "%s: delay next check %is",
2052                                         pp->dev_t, pp->checkint);
2053                         }
2054                         if (pp->watch_checks > 0)
2055                                 pp->watch_checks--;
2056                         pp->tick = pp->checkint;
2057                 }
2058         }
2059         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
2060                 if (pp->dmstate == PSTATE_ACTIVE ||
2061                     pp->dmstate == PSTATE_UNDEF)
2062                         fail_path(pp, 0);
2063                 if (newstate == PATH_DOWN) {
2064                         int log_checker_err;
2065
2066                         conf = get_multipath_config();
2067                         log_checker_err = conf->log_checker_err;
2068                         put_multipath_config(conf);
2069                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
2070                                 LOG_MSG(3, checker_message(&pp->checker));
2071                         else
2072                                 LOG_MSG(2, checker_message(&pp->checker));
2073                 }
2074         }
2075
2076         pp->state = newstate;
2077
2078         if (pp->mpp->wait_for_udev)
2079                 return 1;
2080         /*
2081          * path prio refreshing
2082          */
2083         condlog(4, "path prio refresh");
2084
2085         if (update_prio(pp, new_path_up) &&
2086             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
2087              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
2088                 update_path_groups(pp->mpp, vecs, !new_path_up);
2089         else if (need_switch_pathgroup(pp->mpp, 0)) {
2090                 if (pp->mpp->pgfailback > 0 &&
2091                     (new_path_up || pp->mpp->failback_tick <= 0))
2092                         pp->mpp->failback_tick =
2093                                 pp->mpp->pgfailback + 1;
2094                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
2095                          (chkr_new_path_up && followover_should_failback(pp)))
2096                         switch_pathgroup(pp->mpp);
2097         }
2098         return 1;
2099 }
2100
2101 static void init_path_check_interval(struct vectors *vecs)
2102 {
2103         struct config *conf;
2104         struct path *pp;
2105         unsigned int i;
2106
2107         vector_foreach_slot (vecs->pathvec, pp, i) {
2108                 conf = get_multipath_config();
2109                 pp->checkint = conf->checkint;
2110                 put_multipath_config(conf);
2111         }
2112 }
2113
2114 static void *
2115 checkerloop (void *ap)
2116 {
2117         struct vectors *vecs;
2118         struct path *pp;
2119         int count = 0;
2120         unsigned int i;
2121         struct timespec last_time;
2122         struct config *conf;
2123
2124         pthread_cleanup_push(rcu_unregister, NULL);
2125         rcu_register_thread();
2126         mlockall(MCL_CURRENT | MCL_FUTURE);
2127         vecs = (struct vectors *)ap;
2128         condlog(2, "path checkers start up");
2129
2130         /* Tweak start time for initial path check */
2131         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
2132                 last_time.tv_sec = 0;
2133         else
2134                 last_time.tv_sec -= 1;
2135
2136         while (1) {
2137                 struct timespec diff_time, start_time, end_time;
2138                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
2139
2140                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
2141                         start_time.tv_sec = 0;
2142                 if (start_time.tv_sec && last_time.tv_sec) {
2143                         timespecsub(&start_time, &last_time, &diff_time);
2144                         condlog(4, "tick (%lu.%06lu secs)",
2145                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
2146                         last_time = start_time;
2147                         ticks = diff_time.tv_sec;
2148                 } else {
2149                         ticks = 1;
2150                         condlog(4, "tick (%d ticks)", ticks);
2151                 }
2152 #ifdef USE_SYSTEMD
2153                 if (use_watchdog)
2154                         sd_notify(0, "WATCHDOG=1");
2155 #endif
2156                 rc = set_config_state(DAEMON_RUNNING);
2157                 if (rc == ETIMEDOUT) {
2158                         condlog(4, "timeout waiting for DAEMON_IDLE");
2159                         continue;
2160                 }
2161
2162                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2163                 lock(&vecs->lock);
2164                 pthread_testcancel();
2165                 vector_foreach_slot (vecs->pathvec, pp, i) {
2166                         rc = check_path(vecs, pp, ticks);
2167                         if (rc < 0) {
2168                                 vector_del_slot(vecs->pathvec, i);
2169                                 free_path(pp);
2170                                 i--;
2171                         } else
2172                                 num_paths += rc;
2173                 }
2174                 lock_cleanup_pop(vecs->lock);
2175
2176                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
2177                 lock(&vecs->lock);
2178                 pthread_testcancel();
2179                 defered_failback_tick(vecs->mpvec);
2180                 retry_count_tick(vecs->mpvec);
2181                 missing_uev_wait_tick(vecs);
2182                 ghost_delay_tick(vecs);
2183                 lock_cleanup_pop(vecs->lock);
2184
2185                 if (count)
2186                         count--;
2187                 else {
2188                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2189                         lock(&vecs->lock);
2190                         pthread_testcancel();
2191                         condlog(4, "map garbage collection");
2192                         mpvec_garbage_collector(vecs);
2193                         count = MAPGCINT;
2194                         lock_cleanup_pop(vecs->lock);
2195                 }
2196
2197                 diff_time.tv_nsec = 0;
2198                 if (start_time.tv_sec &&
2199                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
2200                         timespecsub(&end_time, &start_time, &diff_time);
2201                         if (num_paths) {
2202                                 unsigned int max_checkint;
2203
2204                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
2205                                         num_paths, num_paths > 1 ? "s" : "",
2206                                         diff_time.tv_sec,
2207                                         diff_time.tv_nsec / 1000);
2208                                 conf = get_multipath_config();
2209                                 max_checkint = conf->max_checkint;
2210                                 put_multipath_config(conf);
2211                                 if (diff_time.tv_sec > max_checkint)
2212                                         condlog(1, "path checkers took longer "
2213                                                 "than %lu seconds, consider "
2214                                                 "increasing max_polling_interval",
2215                                                 diff_time.tv_sec);
2216                         }
2217                 }
2218                 check_foreign();
2219                 post_config_state(DAEMON_IDLE);
2220                 conf = get_multipath_config();
2221                 strict_timing = conf->strict_timing;
2222                 put_multipath_config(conf);
2223                 if (!strict_timing)
2224                         sleep(1);
2225                 else {
2226                         if (diff_time.tv_nsec) {
2227                                 diff_time.tv_sec = 0;
2228                                 diff_time.tv_nsec =
2229                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
2230                         } else
2231                                 diff_time.tv_sec = 1;
2232
2233                         condlog(3, "waiting for %lu.%06lu secs",
2234                                 diff_time.tv_sec,
2235                                 diff_time.tv_nsec / 1000);
2236                         if (nanosleep(&diff_time, NULL) != 0) {
2237                                 condlog(3, "nanosleep failed with error %d",
2238                                         errno);
2239                                 conf = get_multipath_config();
2240                                 conf->strict_timing = 0;
2241                                 put_multipath_config(conf);
2242                                 break;
2243                         }
2244                 }
2245         }
2246         pthread_cleanup_pop(1);
2247         return NULL;
2248 }
2249
2250 int
2251 configure (struct vectors * vecs)
2252 {
2253         struct multipath * mpp;
2254         struct path * pp;
2255         vector mpvec;
2256         int i, ret;
2257         struct config *conf;
2258         static int force_reload = FORCE_RELOAD_WEAK;
2259
2260         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2261                 condlog(0, "couldn't allocate path vec in configure");
2262                 return 1;
2263         }
2264
2265         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2266                 condlog(0, "couldn't allocate multipath vec in configure");
2267                 return 1;
2268         }
2269
2270         if (!(mpvec = vector_alloc())) {
2271                 condlog(0, "couldn't allocate new maps vec in configure");
2272                 return 1;
2273         }
2274
2275         /*
2276          * probe for current path (from sysfs) and map (from dm) sets
2277          */
2278         ret = path_discovery(vecs->pathvec, DI_ALL);
2279         if (ret < 0) {
2280                 condlog(0, "configure failed at path discovery");
2281                 goto fail;
2282         }
2283
2284         vector_foreach_slot (vecs->pathvec, pp, i){
2285                 conf = get_multipath_config();
2286                 pthread_cleanup_push(put_multipath_config, conf);
2287                 if (filter_path(conf, pp) > 0){
2288                         vector_del_slot(vecs->pathvec, i);
2289                         free_path(pp);
2290                         i--;
2291                 }
2292                 else
2293                         pp->checkint = conf->checkint;
2294                 pthread_cleanup_pop(1);
2295         }
2296         if (map_discovery(vecs)) {
2297                 condlog(0, "configure failed at map discovery");
2298                 goto fail;
2299         }
2300
2301         /*
2302          * create new set of maps & push changed ones into dm
2303          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2304          * superfluous ACT_RELOAD ioctls. Later calls are done
2305          * with FORCE_RELOAD_YES.
2306          */
2307         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2308         if (force_reload == FORCE_RELOAD_WEAK)
2309                 force_reload = FORCE_RELOAD_YES;
2310         if (ret) {
2311                 condlog(0, "configure failed while coalescing paths");
2312                 goto fail;
2313         }
2314
2315         /*
2316          * may need to remove some maps which are no longer relevant
2317          * e.g., due to blacklist changes in conf file
2318          */
2319         if (coalesce_maps(vecs, mpvec)) {
2320                 condlog(0, "configure failed while coalescing maps");
2321                 goto fail;
2322         }
2323
2324         dm_lib_release();
2325
2326         sync_maps_state(mpvec);
2327         vector_foreach_slot(mpvec, mpp, i){
2328                 if (remember_wwid(mpp->wwid) == 1)
2329                         trigger_paths_udev_change(mpp, true);
2330                 update_map_pr(mpp);
2331         }
2332
2333         /*
2334          * purge dm of old maps
2335          */
2336         remove_maps(vecs);
2337
2338         /*
2339          * save new set of maps formed by considering current path state
2340          */
2341         vector_free(vecs->mpvec);
2342         vecs->mpvec = mpvec;
2343
2344         /*
2345          * start dm event waiter threads for these new maps
2346          */
2347         vector_foreach_slot(vecs->mpvec, mpp, i) {
2348                 if (wait_for_events(mpp, vecs)) {
2349                         remove_map(mpp, vecs, 1);
2350                         i--;
2351                         continue;
2352                 }
2353                 if (setup_multipath(vecs, mpp))
2354                         i--;
2355         }
2356         return 0;
2357
2358 fail:
2359         vector_free(mpvec);
2360         return 1;
2361 }
2362
2363 int
2364 need_to_delay_reconfig(struct vectors * vecs)
2365 {
2366         struct multipath *mpp;
2367         int i;
2368
2369         if (!VECTOR_SIZE(vecs->mpvec))
2370                 return 0;
2371
2372         vector_foreach_slot(vecs->mpvec, mpp, i) {
2373                 if (mpp->wait_for_udev)
2374                         return 1;
2375         }
2376         return 0;
2377 }
2378
2379 void rcu_free_config(struct rcu_head *head)
2380 {
2381         struct config *conf = container_of(head, struct config, rcu);
2382
2383         free_config(conf);
2384 }
2385
2386 int
2387 reconfigure (struct vectors * vecs)
2388 {
2389         struct config * old, *conf;
2390
2391         conf = load_config(DEFAULT_CONFIGFILE);
2392         if (!conf)
2393                 return 1;
2394
2395         /*
2396          * free old map and path vectors ... they use old conf state
2397          */
2398         if (VECTOR_SIZE(vecs->mpvec))
2399                 remove_maps_and_stop_waiters(vecs);
2400
2401         free_pathvec(vecs->pathvec, FREE_PATHS);
2402         vecs->pathvec = NULL;
2403         delete_all_foreign();
2404
2405         /* Re-read any timezone changes */
2406         tzset();
2407
2408         dm_tgt_version(conf->version, TGT_MPATH);
2409         if (verbosity)
2410                 conf->verbosity = verbosity;
2411         if (bindings_read_only)
2412                 conf->bindings_read_only = bindings_read_only;
2413         uxsock_timeout = conf->uxsock_timeout;
2414
2415         old = rcu_dereference(multipath_conf);
2416         rcu_assign_pointer(multipath_conf, conf);
2417         call_rcu(&old->rcu, rcu_free_config);
2418
2419         configure(vecs);
2420
2421
2422         return 0;
2423 }
2424
2425 static struct vectors *
2426 init_vecs (void)
2427 {
2428         struct vectors * vecs;
2429
2430         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2431
2432         if (!vecs)
2433                 return NULL;
2434
2435         pthread_mutex_init(&vecs->lock.mutex, NULL);
2436
2437         return vecs;
2438 }
2439
2440 static void *
2441 signal_set(int signo, void (*func) (int))
2442 {
2443         int r;
2444         struct sigaction sig;
2445         struct sigaction osig;
2446
2447         sig.sa_handler = func;
2448         sigemptyset(&sig.sa_mask);
2449         sig.sa_flags = 0;
2450
2451         r = sigaction(signo, &sig, &osig);
2452
2453         if (r < 0)
2454                 return (SIG_ERR);
2455         else
2456                 return (osig.sa_handler);
2457 }
2458
2459 void
2460 handle_signals(bool nonfatal)
2461 {
2462         if (exit_sig) {
2463                 condlog(2, "exit (signal)");
2464                 exit_sig = 0;
2465                 exit_daemon();
2466         }
2467         if (!nonfatal)
2468                 return;
2469         if (reconfig_sig) {
2470                 condlog(2, "reconfigure (signal)");
2471                 set_config_state(DAEMON_CONFIGURE);
2472         }
2473         if (log_reset_sig) {
2474                 condlog(2, "reset log (signal)");
2475                 if (logsink == 1)
2476                         log_thread_reset();
2477         }
2478         reconfig_sig = 0;
2479         log_reset_sig = 0;
2480 }
2481
2482 static void
2483 sighup (int sig)
2484 {
2485         reconfig_sig = 1;
2486 }
2487
2488 static void
2489 sigend (int sig)
2490 {
2491         exit_sig = 1;
2492 }
2493
2494 static void
2495 sigusr1 (int sig)
2496 {
2497         log_reset_sig = 1;
2498 }
2499
2500 static void
2501 sigusr2 (int sig)
2502 {
2503         condlog(3, "SIGUSR2 received");
2504 }
2505
2506 static void
2507 signal_init(void)
2508 {
2509         sigset_t set;
2510
2511         /* block all signals */
2512         sigfillset(&set);
2513         /* SIGPIPE occurs if logging fails */
2514         sigdelset(&set, SIGPIPE);
2515         pthread_sigmask(SIG_SETMASK, &set, NULL);
2516
2517         /* Other signals will be unblocked in the uxlsnr thread */
2518         signal_set(SIGHUP, sighup);
2519         signal_set(SIGUSR1, sigusr1);
2520         signal_set(SIGUSR2, sigusr2);
2521         signal_set(SIGINT, sigend);
2522         signal_set(SIGTERM, sigend);
2523         signal_set(SIGPIPE, sigend);
2524 }
2525
2526 static void
2527 setscheduler (void)
2528 {
2529         int res;
2530         static struct sched_param sched_param = {
2531                 .sched_priority = 99
2532         };
2533
2534         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2535
2536         if (res == -1)
2537                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2538         return;
2539 }
2540
2541 static void
2542 set_oom_adj (void)
2543 {
2544 #ifdef OOM_SCORE_ADJ_MIN
2545         int retry = 1;
2546         char *file = "/proc/self/oom_score_adj";
2547         int score = OOM_SCORE_ADJ_MIN;
2548 #else
2549         int retry = 0;
2550         char *file = "/proc/self/oom_adj";
2551         int score = OOM_ADJUST_MIN;
2552 #endif
2553         FILE *fp;
2554         struct stat st;
2555         char *envp;
2556
2557         envp = getenv("OOMScoreAdjust");
2558         if (envp) {
2559                 condlog(3, "Using systemd provided OOMScoreAdjust");
2560                 return;
2561         }
2562         do {
2563                 if (stat(file, &st) == 0){
2564                         fp = fopen(file, "w");
2565                         if (!fp) {
2566                                 condlog(0, "couldn't fopen %s : %s", file,
2567                                         strerror(errno));
2568                                 return;
2569                         }
2570                         fprintf(fp, "%i", score);
2571                         fclose(fp);
2572                         return;
2573                 }
2574                 if (errno != ENOENT) {
2575                         condlog(0, "couldn't stat %s : %s", file,
2576                                 strerror(errno));
2577                         return;
2578                 }
2579 #ifdef OOM_ADJUST_MIN
2580                 file = "/proc/self/oom_adj";
2581                 score = OOM_ADJUST_MIN;
2582 #else
2583                 retry = 0;
2584 #endif
2585         } while (retry--);
2586         condlog(0, "couldn't adjust oom score");
2587 }
2588
2589 static int
2590 child (void * param)
2591 {
2592         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr, dmevent_thr;
2593         pthread_attr_t log_attr, misc_attr, uevent_attr;
2594         struct vectors * vecs;
2595         struct multipath * mpp;
2596         int i;
2597 #ifdef USE_SYSTEMD
2598         unsigned long checkint;
2599         int startup_done = 0;
2600 #endif
2601         int rc;
2602         int pid_fd = -1;
2603         struct config *conf;
2604         char *envp;
2605         int queue_without_daemon;
2606
2607         mlockall(MCL_CURRENT | MCL_FUTURE);
2608         signal_init();
2609         rcu_init();
2610
2611         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2612         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2613         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2614         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2615
2616         if (logsink == 1) {
2617                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2618                 log_thread_start(&log_attr);
2619                 pthread_attr_destroy(&log_attr);
2620         }
2621         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2622         if (pid_fd < 0) {
2623                 condlog(1, "failed to create pidfile");
2624                 if (logsink == 1)
2625                         log_thread_stop();
2626                 exit(1);
2627         }
2628
2629         post_config_state(DAEMON_START);
2630
2631         condlog(2, "--------start up--------");
2632         condlog(2, "read " DEFAULT_CONFIGFILE);
2633
2634         conf = load_config(DEFAULT_CONFIGFILE);
2635         if (!conf)
2636                 goto failed;
2637
2638         if (verbosity)
2639                 conf->verbosity = verbosity;
2640         if (bindings_read_only)
2641                 conf->bindings_read_only = bindings_read_only;
2642         uxsock_timeout = conf->uxsock_timeout;
2643         rcu_assign_pointer(multipath_conf, conf);
2644         if (init_checkers(conf->multipath_dir)) {
2645                 condlog(0, "failed to initialize checkers");
2646                 goto failed;
2647         }
2648         if (init_prio(conf->multipath_dir)) {
2649                 condlog(0, "failed to initialize prioritizers");
2650                 goto failed;
2651         }
2652         /* Failing this is non-fatal */
2653
2654         init_foreign(conf->multipath_dir);
2655
2656         if (poll_dmevents)
2657                 poll_dmevents = dmevent_poll_supported();
2658         setlogmask(LOG_UPTO(conf->verbosity + 3));
2659
2660         envp = getenv("LimitNOFILE");
2661
2662         if (envp) {
2663                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2664         } else if (conf->max_fds) {
2665                 struct rlimit fd_limit;
2666
2667                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2668                         condlog(0, "can't get open fds limit: %s",
2669                                 strerror(errno));
2670                         fd_limit.rlim_cur = 0;
2671                         fd_limit.rlim_max = 0;
2672                 }
2673                 if (fd_limit.rlim_cur < conf->max_fds) {
2674                         fd_limit.rlim_cur = conf->max_fds;
2675                         if (fd_limit.rlim_max < conf->max_fds)
2676                                 fd_limit.rlim_max = conf->max_fds;
2677                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2678                                 condlog(0, "can't set open fds limit to "
2679                                         "%lu/%lu : %s",
2680                                         fd_limit.rlim_cur, fd_limit.rlim_max,
2681                                         strerror(errno));
2682                         } else {
2683                                 condlog(3, "set open fds limit to %lu/%lu",
2684                                         fd_limit.rlim_cur, fd_limit.rlim_max);
2685                         }
2686                 }
2687
2688         }
2689
2690         vecs = gvecs = init_vecs();
2691         if (!vecs)
2692                 goto failed;
2693
2694         setscheduler();
2695         set_oom_adj();
2696
2697 #ifdef USE_SYSTEMD
2698         envp = getenv("WATCHDOG_USEC");
2699         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2700                 /* Value is in microseconds */
2701                 conf->max_checkint = checkint / 1000000;
2702                 /* Rescale checkint */
2703                 if (conf->checkint > conf->max_checkint)
2704                         conf->checkint = conf->max_checkint;
2705                 else
2706                         conf->checkint = conf->max_checkint / 4;
2707                 condlog(3, "enabling watchdog, interval %d max %d",
2708                         conf->checkint, conf->max_checkint);
2709                 use_watchdog = conf->checkint;
2710         }
2711 #endif
2712         /*
2713          * Startup done, invalidate configuration
2714          */
2715         conf = NULL;
2716
2717         /*
2718          * Signal start of configuration
2719          */
2720         post_config_state(DAEMON_CONFIGURE);
2721
2722         init_path_check_interval(vecs);
2723
2724         if (poll_dmevents) {
2725                 if (init_dmevent_waiter(vecs)) {
2726                         condlog(0, "failed to allocate dmevents waiter info");
2727                         goto failed;
2728                 }
2729                 if ((rc = pthread_create(&dmevent_thr, &misc_attr,
2730                                          wait_dmevents, NULL))) {
2731                         condlog(0, "failed to create dmevent waiter thread: %d",
2732                                 rc);
2733                         goto failed;
2734                 }
2735         }
2736
2737         /*
2738          * Start uevent listener early to catch events
2739          */
2740         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2741                 condlog(0, "failed to create uevent thread: %d", rc);
2742                 goto failed;
2743         }
2744         pthread_attr_destroy(&uevent_attr);
2745         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2746                 condlog(0, "failed to create cli listener: %d", rc);
2747                 goto failed;
2748         }
2749
2750         /*
2751          * start threads
2752          */
2753         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2754                 condlog(0,"failed to create checker loop thread: %d", rc);
2755                 goto failed;
2756         }
2757         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2758                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2759                 goto failed;
2760         }
2761         pthread_attr_destroy(&misc_attr);
2762
2763         while (running_state != DAEMON_SHUTDOWN) {
2764                 pthread_cleanup_push(config_cleanup, NULL);
2765                 pthread_mutex_lock(&config_lock);
2766                 if (running_state != DAEMON_CONFIGURE &&
2767                     running_state != DAEMON_SHUTDOWN) {
2768                         pthread_cond_wait(&config_cond, &config_lock);
2769                 }
2770                 pthread_cleanup_pop(1);
2771                 if (running_state == DAEMON_CONFIGURE) {
2772                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2773                         lock(&vecs->lock);
2774                         pthread_testcancel();
2775                         if (!need_to_delay_reconfig(vecs)) {
2776                                 reconfigure(vecs);
2777                         } else {
2778                                 conf = get_multipath_config();
2779                                 conf->delayed_reconfig = 1;
2780                                 put_multipath_config(conf);
2781                         }
2782                         lock_cleanup_pop(vecs->lock);
2783                         post_config_state(DAEMON_IDLE);
2784 #ifdef USE_SYSTEMD
2785                         if (!startup_done) {
2786                                 sd_notify(0, "READY=1");
2787                                 startup_done = 1;
2788                         }
2789 #endif
2790                 }
2791         }
2792
2793         lock(&vecs->lock);
2794         conf = get_multipath_config();
2795         queue_without_daemon = conf->queue_without_daemon;
2796         put_multipath_config(conf);
2797         if (queue_without_daemon == QUE_NO_DAEMON_OFF)
2798                 vector_foreach_slot(vecs->mpvec, mpp, i)
2799                         dm_queue_if_no_path(mpp->alias, 0);
2800         remove_maps_and_stop_waiters(vecs);
2801         unlock(&vecs->lock);
2802
2803         pthread_cancel(check_thr);
2804         pthread_cancel(uevent_thr);
2805         pthread_cancel(uxlsnr_thr);
2806         pthread_cancel(uevq_thr);
2807         if (poll_dmevents)
2808                 pthread_cancel(dmevent_thr);
2809
2810         pthread_join(check_thr, NULL);
2811         pthread_join(uevent_thr, NULL);
2812         pthread_join(uxlsnr_thr, NULL);
2813         pthread_join(uevq_thr, NULL);
2814         if (poll_dmevents)
2815                 pthread_join(dmevent_thr, NULL);
2816
2817         stop_io_err_stat_thread();
2818
2819         lock(&vecs->lock);
2820         free_pathvec(vecs->pathvec, FREE_PATHS);
2821         vecs->pathvec = NULL;
2822         unlock(&vecs->lock);
2823
2824         pthread_mutex_destroy(&vecs->lock.mutex);
2825         FREE(vecs);
2826         vecs = NULL;
2827
2828         cleanup_foreign();
2829         cleanup_checkers();
2830         cleanup_prio();
2831         if (poll_dmevents)
2832                 cleanup_dmevent_waiter();
2833
2834         dm_lib_release();
2835         dm_lib_exit();
2836
2837         /* We're done here */
2838         condlog(3, "unlink pidfile");
2839         unlink(DEFAULT_PIDFILE);
2840
2841         condlog(2, "--------shut down-------");
2842
2843         if (logsink == 1)
2844                 log_thread_stop();
2845
2846         /*
2847          * Freeing config must be done after condlog() and dm_lib_exit(),
2848          * because logging functions like dlog() and dm_write_log()
2849          * reference the config.
2850          */
2851         conf = rcu_dereference(multipath_conf);
2852         rcu_assign_pointer(multipath_conf, NULL);
2853         call_rcu(&conf->rcu, rcu_free_config);
2854         udev_unref(udev);
2855         udev = NULL;
2856         pthread_attr_destroy(&waiter_attr);
2857         pthread_attr_destroy(&io_err_stat_attr);
2858 #ifdef _DEBUG_
2859         dbg_free_final(NULL);
2860 #endif
2861
2862 #ifdef USE_SYSTEMD
2863         sd_notify(0, "ERRNO=0");
2864 #endif
2865         exit(0);
2866
2867 failed:
2868 #ifdef USE_SYSTEMD
2869         sd_notify(0, "ERRNO=1");
2870 #endif
2871         if (pid_fd >= 0)
2872                 close(pid_fd);
2873         exit(1);
2874 }
2875
2876 static int
2877 daemonize(void)
2878 {
2879         int pid;
2880         int dev_null_fd;
2881
2882         if( (pid = fork()) < 0){
2883                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2884                 return -1;
2885         }
2886         else if (pid != 0)
2887                 return pid;
2888
2889         setsid();
2890
2891         if ( (pid = fork()) < 0)
2892                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2893         else if (pid != 0)
2894                 _exit(0);
2895
2896         if (chdir("/") < 0)
2897                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2898
2899         dev_null_fd = open("/dev/null", O_RDWR);
2900         if (dev_null_fd < 0){
2901                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2902                         strerror(errno));
2903                 _exit(0);
2904         }
2905
2906         close(STDIN_FILENO);
2907         if (dup(dev_null_fd) < 0) {
2908                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2909                         strerror(errno));
2910                 _exit(0);
2911         }
2912         close(STDOUT_FILENO);
2913         if (dup(dev_null_fd) < 0) {
2914                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2915                         strerror(errno));
2916                 _exit(0);
2917         }
2918         close(STDERR_FILENO);
2919         if (dup(dev_null_fd) < 0) {
2920                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2921                         strerror(errno));
2922                 _exit(0);
2923         }
2924         close(dev_null_fd);
2925         daemon_pid = getpid();
2926         return 0;
2927 }
2928
2929 int
2930 main (int argc, char *argv[])
2931 {
2932         extern char *optarg;
2933         extern int optind;
2934         int arg;
2935         int err;
2936         int foreground = 0;
2937         struct config *conf;
2938
2939         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2940                                    "Manipulated through RCU");
2941         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2942                 "Suppress complaints about unprotected running_state reads");
2943         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2944                 "Suppress complaints about this scalar variable");
2945
2946         logsink = 1;
2947
2948         if (getuid() != 0) {
2949                 fprintf(stderr, "need to be root\n");
2950                 exit(1);
2951         }
2952
2953         /* make sure we don't lock any path */
2954         if (chdir("/") < 0)
2955                 fprintf(stderr, "can't chdir to root directory : %s\n",
2956                         strerror(errno));
2957         umask(umask(077) | 022);
2958
2959         pthread_cond_init_mono(&config_cond);
2960
2961         udev = udev_new();
2962         libmp_udev_set_sync_support(0);
2963
2964         while ((arg = getopt(argc, argv, ":dsv:k::Bniw")) != EOF ) {
2965                 switch(arg) {
2966                 case 'd':
2967                         foreground = 1;
2968                         if (logsink > 0)
2969                                 logsink = 0;
2970                         //debug=1; /* ### comment me out ### */
2971                         break;
2972                 case 'v':
2973                         if (sizeof(optarg) > sizeof(char *) ||
2974                             !isdigit(optarg[0]))
2975                                 exit(1);
2976
2977                         verbosity = atoi(optarg);
2978                         break;
2979                 case 's':
2980                         logsink = -1;
2981                         break;
2982                 case 'k':
2983                         conf = load_config(DEFAULT_CONFIGFILE);
2984                         if (!conf)
2985                                 exit(1);
2986                         if (verbosity)
2987                                 conf->verbosity = verbosity;
2988                         uxsock_timeout = conf->uxsock_timeout;
2989                         uxclnt(optarg, uxsock_timeout + 100);
2990                         free_config(conf);
2991                         exit(0);
2992                 case 'B':
2993                         bindings_read_only = 1;
2994                         break;
2995                 case 'n':
2996                         condlog(0, "WARNING: ignoring deprecated option -n, use 'ignore_wwids = no' instead");
2997                         break;
2998                 case 'w':
2999                         poll_dmevents = 0;
3000                         break;
3001                 default:
3002                         fprintf(stderr, "Invalid argument '-%c'\n",
3003                                 optopt);
3004                         exit(1);
3005                 }
3006         }
3007         if (optind < argc) {
3008                 char cmd[CMDSIZE];
3009                 char * s = cmd;
3010                 char * c = s;
3011
3012                 conf = load_config(DEFAULT_CONFIGFILE);
3013                 if (!conf)
3014                         exit(1);
3015                 if (verbosity)
3016                         conf->verbosity = verbosity;
3017                 uxsock_timeout = conf->uxsock_timeout;
3018                 memset(cmd, 0x0, CMDSIZE);
3019                 while (optind < argc) {
3020                         if (strchr(argv[optind], ' '))
3021                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
3022                         else
3023                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
3024                         optind++;
3025                 }
3026                 c += snprintf(c, s + CMDSIZE - c, "\n");
3027                 uxclnt(s, uxsock_timeout + 100);
3028                 free_config(conf);
3029                 exit(0);
3030         }
3031
3032         if (foreground) {
3033                 if (!isatty(fileno(stdout)))
3034                         setbuf(stdout, NULL);
3035                 err = 0;
3036                 daemon_pid = getpid();
3037         } else
3038                 err = daemonize();
3039
3040         if (err < 0)
3041                 /* error */
3042                 exit(1);
3043         else if (err > 0)
3044                 /* parent dies */
3045                 exit(0);
3046         else
3047                 /* child lives */
3048                 return (child(NULL));
3049 }
3050
3051 void *  mpath_pr_event_handler_fn (void * pathp )
3052 {
3053         struct multipath * mpp;
3054         int i, ret, isFound;
3055         struct path * pp = (struct path *)pathp;
3056         struct prout_param_descriptor *param;
3057         struct prin_resp *resp;
3058
3059         rcu_register_thread();
3060         mpp = pp->mpp;
3061
3062         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
3063         if (!resp){
3064                 condlog(0,"%s Alloc failed for prin response", pp->dev);
3065                 goto out;
3066         }
3067
3068         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
3069         if (ret != MPATH_PR_SUCCESS )
3070         {
3071                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
3072                 goto out;
3073         }
3074
3075         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
3076                         resp->prin_descriptor.prin_readkeys.additional_length );
3077
3078         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
3079         {
3080                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
3081                 ret = MPATH_PR_SUCCESS;
3082                 goto out;
3083         }
3084         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
3085                 get_be64(mpp->reservation_key));
3086
3087         isFound =0;
3088         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
3089         {
3090                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
3091                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
3092                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
3093                 {
3094                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
3095                         isFound =1;
3096                         break;
3097                 }
3098         }
3099         if (!isFound)
3100         {
3101                 condlog(0, "%s: Either device not registered or ", pp->dev);
3102                 condlog(0, "host is not authorised for registration. Skip path");
3103                 ret = MPATH_PR_OTHER;
3104                 goto out;
3105         }
3106
3107         param= malloc(sizeof(struct prout_param_descriptor));
3108         memset(param, 0 , sizeof(struct prout_param_descriptor));
3109         param->sa_flags = mpp->sa_flags;
3110         memcpy(param->sa_key, &mpp->reservation_key, 8);
3111         param->num_transportid = 0;
3112
3113         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
3114
3115         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
3116         if (ret != MPATH_PR_SUCCESS )
3117         {
3118                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
3119         }
3120         mpp->prflag = 1;
3121
3122         free(param);
3123 out:
3124         if (resp)
3125                 free(resp);
3126         rcu_unregister_thread();
3127         return NULL;
3128 }
3129
3130 int mpath_pr_event_handle(struct path *pp)
3131 {
3132         pthread_t thread;
3133         int rc;
3134         pthread_attr_t attr;
3135         struct multipath * mpp;
3136
3137         if (pp->bus != SYSFS_BUS_SCSI)
3138                 return 0;
3139
3140         mpp = pp->mpp;
3141
3142         if (!get_be64(mpp->reservation_key))
3143                 return -1;
3144
3145         pthread_attr_init(&attr);
3146         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
3147
3148         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
3149         if (rc) {
3150                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
3151                 return -1;
3152         }
3153         pthread_attr_destroy(&attr);
3154         rc = pthread_join(thread, NULL);
3155         return 0;
3156 }