9568ae9987587c4fdbd89c30dab4c55ba373a5ee
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #ifdef USE_SYSTEMD
21 #include <systemd/sd-daemon.h>
22 #endif
23 #include <semaphore.h>
24 #include <mpath_cmd.h>
25 #include <mpath_persist.h>
26 #include <time.h>
27
28 /*
29  * libcheckers
30  */
31 #include <checkers.h>
32
33 #ifdef USE_SYSTEMD
34 static int use_watchdog;
35 #endif
36
37 int uxsock_timeout;
38
39 /*
40  * libmultipath
41  */
42 #include <parser.h>
43 #include <vector.h>
44 #include <memory.h>
45 #include <config.h>
46 #include <util.h>
47 #include <hwtable.h>
48 #include <defaults.h>
49 #include <structs.h>
50 #include <blacklist.h>
51 #include <structs_vec.h>
52 #include <dmparser.h>
53 #include <devmapper.h>
54 #include <sysfs.h>
55 #include <dict.h>
56 #include <discovery.h>
57 #include <debug.h>
58 #include <propsel.h>
59 #include <uevent.h>
60 #include <switchgroup.h>
61 #include <print.h>
62 #include <configure.h>
63 #include <prio.h>
64 #include <wwids.h>
65 #include <pgpolicies.h>
66 #include <uevent.h>
67 #include <log.h>
68 #include "prioritizers/alua_rtpg.h"
69
70 #include "main.h"
71 #include "pidfile.h"
72 #include "uxlsnr.h"
73 #include "uxclnt.h"
74 #include "cli.h"
75 #include "cli_handlers.h"
76 #include "lock.h"
77 #include "waiter.h"
78 #include "wwids.h"
79
80 #define FILE_NAME_SIZE 256
81 #define CMDSIZE 160
82
83 #define LOG_MSG(a, b) \
84 do { \
85         if (pp->offline) \
86                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
87         else if (strlen(b)) \
88                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
89 } while(0)
90
91 struct mpath_event_param
92 {
93         char * devname;
94         struct multipath *mpp;
95 };
96
97 unsigned int mpath_mx_alloc_len;
98
99 int logsink;
100 enum daemon_status running_state = DAEMON_INIT;
101 pid_t daemon_pid;
102 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
103 pthread_cond_t config_cond = PTHREAD_COND_INITIALIZER;
104
105 /*
106  * global copy of vecs for use in sig handlers
107  */
108 struct vectors * gvecs;
109
110 struct udev * udev;
111
112 const char *
113 daemon_status(void)
114 {
115         switch (running_state) {
116         case DAEMON_INIT:
117                 return "init";
118         case DAEMON_START:
119                 return "startup";
120         case DAEMON_CONFIGURE:
121                 return "configure";
122         case DAEMON_IDLE:
123                 return "idle";
124         case DAEMON_RUNNING:
125                 return "running";
126         case DAEMON_SHUTDOWN:
127                 return "shutdown";
128         }
129         return NULL;
130 }
131
132 /*
133  * I love you too, systemd ...
134  */
135 const char *
136 sd_notify_status(void)
137 {
138         switch (running_state) {
139         case DAEMON_INIT:
140                 return "STATUS=init";
141         case DAEMON_START:
142                 return "STATUS=startup";
143         case DAEMON_CONFIGURE:
144                 return "STATUS=configure";
145         case DAEMON_IDLE:
146                 return "STATUS=idle";
147         case DAEMON_RUNNING:
148                 return "STATUS=running";
149         case DAEMON_SHUTDOWN:
150                 return "STATUS=shutdown";
151         }
152         return NULL;
153 }
154
155 static void config_cleanup(void *arg)
156 {
157         pthread_mutex_unlock(&config_lock);
158 }
159
160 void post_config_state(enum daemon_status state)
161 {
162         pthread_mutex_lock(&config_lock);
163         if (state != running_state) {
164                 running_state = state;
165                 pthread_cond_broadcast(&config_cond);
166 #ifdef USE_SYSTEMD
167                 sd_notify(0, sd_notify_status());
168 #endif
169         }
170         pthread_mutex_unlock(&config_lock);
171 }
172
173 int set_config_state(enum daemon_status state)
174 {
175         int rc = 0;
176
177         pthread_cleanup_push(config_cleanup, NULL);
178         pthread_mutex_lock(&config_lock);
179         if (running_state != state) {
180                 if (running_state != DAEMON_IDLE) {
181                         struct timespec ts;
182
183                         clock_gettime(CLOCK_REALTIME, &ts);
184                         ts.tv_sec += 1;
185                         rc = pthread_cond_timedwait(&config_cond,
186                                                     &config_lock, &ts);
187                 }
188                 if (!rc) {
189                         running_state = state;
190                         pthread_cond_broadcast(&config_cond);
191 #ifdef USE_SYSTEMD
192                         sd_notify(0, sd_notify_status());
193 #endif
194                 }
195         }
196         pthread_cleanup_pop(1);
197         return rc;
198 }
199
200 static int
201 need_switch_pathgroup (struct multipath * mpp, int refresh)
202 {
203         struct pathgroup * pgp;
204         struct path * pp;
205         unsigned int i, j;
206
207         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
208                 return 0;
209
210         /*
211          * Refresh path priority values
212          */
213         if (refresh)
214                 vector_foreach_slot (mpp->pg, pgp, i)
215                         vector_foreach_slot (pgp->paths, pp, j)
216                                 pathinfo(pp, conf->hwtable, DI_PRIO);
217
218         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
219                 return 0;
220
221         mpp->bestpg = select_path_group(mpp);
222
223         if (mpp->bestpg != mpp->nextpg)
224                 return 1;
225
226         return 0;
227 }
228
229 static void
230 switch_pathgroup (struct multipath * mpp)
231 {
232         mpp->stat_switchgroup++;
233         dm_switchgroup(mpp->alias, mpp->bestpg);
234         condlog(2, "%s: switch to path group #%i",
235                  mpp->alias, mpp->bestpg);
236 }
237
238 static int
239 coalesce_maps(struct vectors *vecs, vector nmpv)
240 {
241         struct multipath * ompp;
242         vector ompv = vecs->mpvec;
243         unsigned int i;
244
245         vector_foreach_slot (ompv, ompp, i) {
246                 condlog(3, "%s: coalesce map", ompp->alias);
247                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
248                         /*
249                          * remove all current maps not allowed by the
250                          * current configuration
251                          */
252                         if (dm_flush_map(ompp->alias)) {
253                                 condlog(0, "%s: unable to flush devmap",
254                                         ompp->alias);
255                                 /*
256                                  * may be just because the device is open
257                                  */
258                                 if (setup_multipath(vecs, ompp) != 0) {
259                                         i--;
260                                         continue;
261                                 }
262                                 if (!vector_alloc_slot(nmpv))
263                                         return 1;
264
265                                 vector_set_slot(nmpv, ompp);
266
267                                 vector_del_slot(ompv, i);
268                                 i--;
269                         }
270                         else {
271                                 dm_lib_release();
272                                 condlog(2, "%s devmap removed", ompp->alias);
273                         }
274                 } else if (conf->reassign_maps) {
275                         condlog(3, "%s: Reassign existing device-mapper"
276                                 " devices", ompp->alias);
277                         dm_reassign(ompp->alias);
278                 }
279         }
280         return 0;
281 }
282
283 void
284 sync_map_state(struct multipath *mpp)
285 {
286         struct pathgroup *pgp;
287         struct path *pp;
288         unsigned int i, j;
289
290         if (!mpp->pg)
291                 return;
292
293         vector_foreach_slot (mpp->pg, pgp, i){
294                 vector_foreach_slot (pgp->paths, pp, j){
295                         if (pp->state == PATH_UNCHECKED ||
296                             pp->state == PATH_WILD ||
297                             pp->state == PATH_DELAYED)
298                                 continue;
299                         if ((pp->dmstate == PSTATE_FAILED ||
300                              pp->dmstate == PSTATE_UNDEF) &&
301                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
302                                 dm_reinstate_path(mpp->alias, pp->dev_t);
303                         else if ((pp->dmstate == PSTATE_ACTIVE ||
304                                   pp->dmstate == PSTATE_UNDEF) &&
305                                  (pp->state == PATH_DOWN ||
306                                   pp->state == PATH_SHAKY))
307                                 dm_fail_path(mpp->alias, pp->dev_t);
308                 }
309         }
310 }
311
312 static void
313 sync_maps_state(vector mpvec)
314 {
315         unsigned int i;
316         struct multipath *mpp;
317
318         vector_foreach_slot (mpvec, mpp, i)
319                 sync_map_state(mpp);
320 }
321
322 static int
323 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
324 {
325         int r;
326
327         if (nopaths)
328                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
329         else
330                 r = dm_flush_map(mpp->alias);
331         /*
332          * clear references to this map before flushing so we can ignore
333          * the spurious uevent we may generate with the dm_flush_map call below
334          */
335         if (r) {
336                 /*
337                  * May not really be an error -- if the map was already flushed
338                  * from the device mapper by dmsetup(8) for instance.
339                  */
340                 if (r == 1)
341                         condlog(0, "%s: can't flush", mpp->alias);
342                 else {
343                         condlog(2, "%s: devmap deferred remove", mpp->alias);
344                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
345                 }
346                 return r;
347         }
348         else {
349                 dm_lib_release();
350                 condlog(2, "%s: map flushed", mpp->alias);
351         }
352
353         orphan_paths(vecs->pathvec, mpp);
354         remove_map_and_stop_waiter(mpp, vecs, 1);
355
356         return 0;
357 }
358
359 int
360 update_map (struct multipath *mpp, struct vectors *vecs)
361 {
362         int retries = 3;
363         char params[PARAMS_SIZE] = {0};
364
365 retry:
366         condlog(4, "%s: updating new map", mpp->alias);
367         if (adopt_paths(vecs->pathvec, mpp)) {
368                 condlog(0, "%s: failed to adopt paths for new map update",
369                         mpp->alias);
370                 retries = -1;
371                 goto fail;
372         }
373         verify_paths(mpp, vecs);
374         mpp->flush_on_last_del = FLUSH_UNDEF;
375         mpp->action = ACT_RELOAD;
376
377         if (setup_map(mpp, params, PARAMS_SIZE)) {
378                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
379                 retries = -1;
380                 goto fail;
381         }
382         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
383                 condlog(0, "%s: map_udate sleep", mpp->alias);
384                 sleep(1);
385                 goto retry;
386         }
387         dm_lib_release();
388
389 fail:
390         if (setup_multipath(vecs, mpp))
391                 return 1;
392
393         sync_map_state(mpp);
394
395         if (retries < 0)
396                 condlog(0, "%s: failed reload in new map update", mpp->alias);
397         return 0;
398 }
399
400 static int
401 uev_add_map (struct uevent * uev, struct vectors * vecs)
402 {
403         char *alias;
404         int major = -1, minor = -1, rc;
405
406         condlog(3, "%s: add map (uevent)", uev->kernel);
407         alias = uevent_get_dm_name(uev);
408         if (!alias) {
409                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
410                 major = uevent_get_major(uev);
411                 minor = uevent_get_minor(uev);
412                 alias = dm_mapname(major, minor);
413                 if (!alias) {
414                         condlog(2, "%s: mapname not found for %d:%d",
415                                 uev->kernel, major, minor);
416                         return 1;
417                 }
418         }
419         pthread_cleanup_push(cleanup_lock, &vecs->lock);
420         lock(vecs->lock);
421         pthread_testcancel();
422         rc = ev_add_map(uev->kernel, alias, vecs);
423         lock_cleanup_pop(vecs->lock);
424         FREE(alias);
425         return rc;
426 }
427
428 int
429 ev_add_map (char * dev, char * alias, struct vectors * vecs)
430 {
431         char * refwwid;
432         struct multipath * mpp;
433         int map_present;
434         int r = 1;
435
436         map_present = dm_map_present(alias);
437
438         if (map_present && !dm_is_mpath(alias)) {
439                 condlog(4, "%s: not a multipath map", alias);
440                 return 0;
441         }
442
443         mpp = find_mp_by_alias(vecs->mpvec, alias);
444
445         if (mpp) {
446                 if (mpp->wait_for_udev > 1) {
447                         if (update_map(mpp, vecs))
448                                 /* setup multipathd removed the map */
449                                 return 1;
450                 }
451                 if (mpp->wait_for_udev) {
452                         mpp->wait_for_udev = 0;
453                         if (conf->delayed_reconfig &&
454                             !need_to_delay_reconfig(vecs)) {
455                                 condlog(2, "reconfigure (delayed)");
456                                 set_config_state(DAEMON_CONFIGURE);
457                                 return 0;
458                         }
459                 }
460                 /*
461                  * Not really an error -- we generate our own uevent
462                  * if we create a multipath mapped device as a result
463                  * of uev_add_path
464                  */
465                 if (conf->reassign_maps) {
466                         condlog(3, "%s: Reassign existing device-mapper devices",
467                                 alias);
468                         dm_reassign(alias);
469                 }
470                 return 0;
471         }
472         condlog(2, "%s: adding map", alias);
473
474         /*
475          * now we can register the map
476          */
477         if (map_present) {
478                 if ((mpp = add_map_without_path(vecs, alias))) {
479                         sync_map_state(mpp);
480                         condlog(2, "%s: devmap %s registered", alias, dev);
481                         return 0;
482                 } else {
483                         condlog(2, "%s: uev_add_map failed", dev);
484                         return 1;
485                 }
486         }
487         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
488
489         if (refwwid) {
490                 r = coalesce_paths(vecs, NULL, refwwid, 0, 1);
491                 dm_lib_release();
492         }
493
494         if (!r)
495                 condlog(2, "%s: devmap %s added", alias, dev);
496         else if (r == 2)
497                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
498         else
499                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
500
501         FREE(refwwid);
502         return r;
503 }
504
505 static int
506 uev_remove_map (struct uevent * uev, struct vectors * vecs)
507 {
508         char *alias;
509         int minor;
510         struct multipath *mpp;
511
512         condlog(2, "%s: remove map (uevent)", uev->kernel);
513         alias = uevent_get_dm_name(uev);
514         if (!alias) {
515                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
516                 return 0;
517         }
518         minor = uevent_get_minor(uev);
519
520         pthread_cleanup_push(cleanup_lock, &vecs->lock);
521         lock(vecs->lock);
522         pthread_testcancel();
523         mpp = find_mp_by_minor(vecs->mpvec, minor);
524
525         if (!mpp) {
526                 condlog(2, "%s: devmap not registered, can't remove",
527                         uev->kernel);
528                 goto out;
529         }
530         if (strcmp(mpp->alias, alias)) {
531                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
532                         mpp->alias, mpp->dmi->minor, minor);
533                 goto out;
534         }
535
536         orphan_paths(vecs->pathvec, mpp);
537         remove_map_and_stop_waiter(mpp, vecs, 1);
538 out:
539         lock_cleanup_pop(vecs->lock);
540         FREE(alias);
541         return 0;
542 }
543
544 /* Called from CLI handler */
545 int
546 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
547 {
548         struct multipath * mpp;
549
550         mpp = find_mp_by_minor(vecs->mpvec, minor);
551
552         if (!mpp) {
553                 condlog(2, "%s: devmap not registered, can't remove",
554                         devname);
555                 return 1;
556         }
557         if (strcmp(mpp->alias, alias)) {
558                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
559                         mpp->alias, mpp->dmi->minor, minor);
560                 return 1;
561         }
562         return flush_map(mpp, vecs, 0);
563 }
564
565 static int
566 uev_add_path (struct uevent *uev, struct vectors * vecs)
567 {
568         struct path *pp;
569         int ret = 0, i;
570
571         condlog(2, "%s: add path (uevent)", uev->kernel);
572         if (strstr(uev->kernel, "..") != NULL) {
573                 /*
574                  * Don't allow relative device names in the pathvec
575                  */
576                 condlog(0, "%s: path name is invalid", uev->kernel);
577                 return 1;
578         }
579
580         pthread_cleanup_push(cleanup_lock, &vecs->lock);
581         lock(vecs->lock);
582         pthread_testcancel();
583         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
584         if (pp) {
585                 int r;
586
587                 condlog(0, "%s: spurious uevent, path already in pathvec",
588                         uev->kernel);
589                 if (!pp->mpp && !strlen(pp->wwid)) {
590                         condlog(3, "%s: reinitialize path", uev->kernel);
591                         udev_device_unref(pp->udev);
592                         pp->udev = udev_device_ref(uev->udev);
593                         r = pathinfo(pp, conf->hwtable,
594                                      DI_ALL | DI_BLACKLIST);
595                         if (r == PATHINFO_OK)
596                                 ret = ev_add_path(pp, vecs);
597                         else if (r == PATHINFO_SKIPPED) {
598                                 condlog(3, "%s: remove blacklisted path",
599                                         uev->kernel);
600                                 i = find_slot(vecs->pathvec, (void *)pp);
601                                 if (i != -1)
602                                         vector_del_slot(vecs->pathvec, i);
603                                 free_path(pp);
604                         } else {
605                                 condlog(0, "%s: failed to reinitialize path",
606                                         uev->kernel);
607                                 ret = 1;
608                         }
609                 }
610         }
611         lock_cleanup_pop(vecs->lock);
612         if (pp)
613                 return ret;
614
615         /*
616          * get path vital state
617          */
618         ret = alloc_path_with_pathinfo(conf->hwtable, uev->udev,
619                                        DI_ALL, &pp);
620         if (!pp) {
621                 if (ret == PATHINFO_SKIPPED)
622                         return 0;
623                 condlog(3, "%s: failed to get path info", uev->kernel);
624                 return 1;
625         }
626         pthread_cleanup_push(cleanup_lock, &vecs->lock);
627         lock(vecs->lock);
628         pthread_testcancel();
629         ret = store_path(vecs->pathvec, pp);
630         if (!ret) {
631                 pp->checkint = conf->checkint;
632                 ret = ev_add_path(pp, vecs);
633         } else {
634                 condlog(0, "%s: failed to store path info, "
635                         "dropping event",
636                         uev->kernel);
637                 free_path(pp);
638                 ret = 1;
639         }
640         lock_cleanup_pop(vecs->lock);
641         return ret;
642 }
643
644 /*
645  * returns:
646  * 0: added
647  * 1: error
648  */
649 int
650 ev_add_path (struct path * pp, struct vectors * vecs)
651 {
652         struct multipath * mpp;
653         char params[PARAMS_SIZE] = {0};
654         int retries = 3;
655         int start_waiter = 0;
656         int ret;
657
658         /*
659          * need path UID to go any further
660          */
661         if (strlen(pp->wwid) == 0) {
662                 condlog(0, "%s: failed to get path uid", pp->dev);
663                 goto fail; /* leave path added to pathvec */
664         }
665         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
666         if (mpp && mpp->wait_for_udev) {
667                 mpp->wait_for_udev = 2;
668                 orphan_path(pp, "waiting for create to complete");
669                 return 0;
670         }
671
672         pp->mpp = mpp;
673 rescan:
674         if (mpp) {
675                 if (pp->size && mpp->size != pp->size) {
676                         condlog(0, "%s: failed to add new path %s, "
677                                 "device size mismatch",
678                                 mpp->alias, pp->dev);
679                         int i = find_slot(vecs->pathvec, (void *)pp);
680                         if (i != -1)
681                                 vector_del_slot(vecs->pathvec, i);
682                         free_path(pp);
683                         return 1;
684                 }
685
686                 condlog(4,"%s: adopting all paths for path %s",
687                         mpp->alias, pp->dev);
688                 if (adopt_paths(vecs->pathvec, mpp))
689                         goto fail; /* leave path added to pathvec */
690
691                 verify_paths(mpp, vecs);
692                 mpp->flush_on_last_del = FLUSH_UNDEF;
693                 mpp->action = ACT_RELOAD;
694         } else {
695                 if (!should_multipath(pp, vecs->pathvec)) {
696                         orphan_path(pp, "only one path");
697                         return 0;
698                 }
699                 condlog(4,"%s: creating new map", pp->dev);
700                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
701                         mpp->action = ACT_CREATE;
702                         /*
703                          * We don't depend on ACT_CREATE, as domap will
704                          * set it to ACT_NOTHING when complete.
705                          */
706                         start_waiter = 1;
707                 }
708                 if (!start_waiter)
709                         goto fail; /* leave path added to pathvec */
710         }
711
712         /* persistent reservation check*/
713         mpath_pr_event_handle(pp);
714
715         /*
716          * push the map to the device-mapper
717          */
718         if (setup_map(mpp, params, PARAMS_SIZE)) {
719                 condlog(0, "%s: failed to setup map for addition of new "
720                         "path %s", mpp->alias, pp->dev);
721                 goto fail_map;
722         }
723         /*
724          * reload the map for the multipath mapped device
725          */
726 retry:
727         ret = domap(mpp, params, 1);
728         if (ret <= 0) {
729                 if (ret < 0 && retries-- > 0) {
730                         condlog(0, "%s: retry domap for addition of new "
731                                 "path %s", mpp->alias, pp->dev);
732                         sleep(1);
733                         goto retry;
734                 }
735                 condlog(0, "%s: failed in domap for addition of new "
736                         "path %s", mpp->alias, pp->dev);
737                 /*
738                  * deal with asynchronous uevents :((
739                  */
740                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
741                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
742                         sleep(1);
743                         update_mpp_paths(mpp, vecs->pathvec);
744                         goto rescan;
745                 }
746                 else if (mpp->action == ACT_RELOAD)
747                         condlog(0, "%s: giving up reload", mpp->alias);
748                 else
749                         goto fail_map;
750         }
751         dm_lib_release();
752
753         /*
754          * update our state from kernel regardless of create or reload
755          */
756         if (setup_multipath(vecs, mpp))
757                 goto fail; /* if setup_multipath fails, it removes the map */
758
759         sync_map_state(mpp);
760
761         if ((mpp->action == ACT_CREATE ||
762              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
763             start_waiter_thread(mpp, vecs))
764                         goto fail_map;
765
766         if (retries >= 0) {
767                 condlog(2, "%s [%s]: path added to devmap %s",
768                         pp->dev, pp->dev_t, mpp->alias);
769                 return 0;
770         } else
771                 goto fail;
772
773 fail_map:
774         remove_map(mpp, vecs, 1);
775 fail:
776         orphan_path(pp, "failed to add path");
777         return 1;
778 }
779
780 static int
781 uev_remove_path (struct uevent *uev, struct vectors * vecs)
782 {
783         struct path *pp;
784         int ret;
785
786         condlog(2, "%s: remove path (uevent)", uev->kernel);
787         pthread_cleanup_push(cleanup_lock, &vecs->lock);
788         lock(vecs->lock);
789         pthread_testcancel();
790         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
791         if (pp)
792                 ret = ev_remove_path(pp, vecs);
793         lock_cleanup_pop(vecs->lock);
794         if (!pp) {
795                 /* Not an error; path might have been purged earlier */
796                 condlog(0, "%s: path already removed", uev->kernel);
797                 return 0;
798         }
799         return ret;
800 }
801
802 int
803 ev_remove_path (struct path *pp, struct vectors * vecs)
804 {
805         struct multipath * mpp;
806         int i, retval = 0;
807         char params[PARAMS_SIZE] = {0};
808
809         /*
810          * avoid referring to the map of an orphaned path
811          */
812         if ((mpp = pp->mpp)) {
813                 /*
814                  * transform the mp->pg vector of vectors of paths
815                  * into a mp->params string to feed the device-mapper
816                  */
817                 if (update_mpp_paths(mpp, vecs->pathvec)) {
818                         condlog(0, "%s: failed to update paths",
819                                 mpp->alias);
820                         goto fail;
821                 }
822                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
823                         vector_del_slot(mpp->paths, i);
824
825                 /*
826                  * remove the map IFF removing the last path
827                  */
828                 if (VECTOR_SIZE(mpp->paths) == 0) {
829                         char alias[WWID_SIZE];
830
831                         /*
832                          * flush_map will fail if the device is open
833                          */
834                         strncpy(alias, mpp->alias, WWID_SIZE);
835                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
836                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
837                                 mpp->retry_tick = 0;
838                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
839                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
840                                 dm_queue_if_no_path(mpp->alias, 0);
841                         }
842                         if (!flush_map(mpp, vecs, 1)) {
843                                 condlog(2, "%s: removed map after"
844                                         " removing all paths",
845                                         alias);
846                                 retval = 0;
847                                 goto out;
848                         }
849                         /*
850                          * Not an error, continue
851                          */
852                 }
853
854                 if (setup_map(mpp, params, PARAMS_SIZE)) {
855                         condlog(0, "%s: failed to setup map for"
856                                 " removal of path %s", mpp->alias, pp->dev);
857                         goto fail;
858                 }
859
860                 if (mpp->wait_for_udev) {
861                         mpp->wait_for_udev = 2;
862                         goto out;
863                 }
864
865                 /*
866                  * reload the map
867                  */
868                 mpp->action = ACT_RELOAD;
869                 if (domap(mpp, params, 1) <= 0) {
870                         condlog(0, "%s: failed in domap for "
871                                 "removal of path %s",
872                                 mpp->alias, pp->dev);
873                         retval = 1;
874                 } else {
875                         /*
876                          * update our state from kernel
877                          */
878                         if (setup_multipath(vecs, mpp))
879                                 return 1;
880                         sync_map_state(mpp);
881
882                         condlog(2, "%s [%s]: path removed from map %s",
883                                 pp->dev, pp->dev_t, mpp->alias);
884                 }
885         }
886
887 out:
888         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
889                 vector_del_slot(vecs->pathvec, i);
890
891         free_path(pp);
892
893         return retval;
894
895 fail:
896         remove_map_and_stop_waiter(mpp, vecs, 1);
897         return 1;
898 }
899
900 static int
901 uev_update_path (struct uevent *uev, struct vectors * vecs)
902 {
903         int ro, retval = 0;
904
905         ro = uevent_get_disk_ro(uev);
906
907         if (ro >= 0) {
908                 struct path * pp;
909                 struct multipath *mpp = NULL;
910
911                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
912                         uev->kernel, ro);
913                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
914                 lock(vecs->lock);
915                 pthread_testcancel();
916                 /*
917                  * pthread_mutex_lock() and pthread_mutex_unlock()
918                  * need to be at the same indentation level, hence
919                  * this slightly convoluted codepath.
920                  */
921                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
922                 if (pp) {
923                         if (pp->initialized == INIT_REQUESTED_UDEV) {
924                                 retval = 2;
925                         } else {
926                                 mpp = pp->mpp;
927                                 if (mpp && mpp->wait_for_udev) {
928                                         mpp->wait_for_udev = 2;
929                                         mpp = NULL;
930                                         retval = 0;
931                                 }
932                         }
933                         if (mpp) {
934                                 retval = reload_map(vecs, mpp, 0, 1);
935
936                                 condlog(2, "%s: map %s reloaded (retval %d)",
937                                         uev->kernel, mpp->alias, retval);
938                         }
939                 }
940                 lock_cleanup_pop(vecs->lock);
941                 if (!pp) {
942                         condlog(0, "%s: spurious uevent, path not found",
943                                 uev->kernel);
944                         return 1;
945                 }
946                 if (retval == 2)
947                         return uev_add_path(uev, vecs);
948         }
949
950         return retval;
951 }
952
953 static int
954 map_discovery (struct vectors * vecs)
955 {
956         struct multipath * mpp;
957         unsigned int i;
958
959         if (dm_get_maps(vecs->mpvec))
960                 return 1;
961
962         vector_foreach_slot (vecs->mpvec, mpp, i)
963                 if (setup_multipath(vecs, mpp))
964                         return 1;
965
966         return 0;
967 }
968
969 int
970 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
971 {
972         struct vectors * vecs;
973         int r;
974
975         *reply = NULL;
976         *len = 0;
977         vecs = (struct vectors *)trigger_data;
978
979         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
980
981         if (r > 0) {
982                 if (r == ETIMEDOUT)
983                         *reply = STRDUP("timeout\n");
984                 else
985                         *reply = STRDUP("fail\n");
986                 *len = strlen(*reply) + 1;
987                 r = 1;
988         }
989         else if (!r && *len == 0) {
990                 *reply = STRDUP("ok\n");
991                 *len = strlen(*reply) + 1;
992                 r = 0;
993         }
994         /* else if (r < 0) leave *reply alone */
995
996         return r;
997 }
998
999 static int
1000 uev_discard(char * devpath)
1001 {
1002         char *tmp;
1003         char a[11], b[11];
1004
1005         /*
1006          * keep only block devices, discard partitions
1007          */
1008         tmp = strstr(devpath, "/block/");
1009         if (tmp == NULL){
1010                 condlog(4, "no /block/ in '%s'", devpath);
1011                 return 1;
1012         }
1013         if (sscanf(tmp, "/block/%10s", a) != 1 ||
1014             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
1015                 condlog(4, "discard event on %s", devpath);
1016                 return 1;
1017         }
1018         return 0;
1019 }
1020
1021 int
1022 uev_trigger (struct uevent * uev, void * trigger_data)
1023 {
1024         int r = 0;
1025         struct vectors * vecs;
1026
1027         vecs = (struct vectors *)trigger_data;
1028
1029         if (uev_discard(uev->devpath))
1030                 return 0;
1031
1032         pthread_cleanup_push(config_cleanup, NULL);
1033         pthread_mutex_lock(&config_lock);
1034         if (running_state != DAEMON_IDLE &&
1035             running_state != DAEMON_RUNNING)
1036                 pthread_cond_wait(&config_cond, &config_lock);
1037         pthread_cleanup_pop(1);
1038
1039         if (running_state == DAEMON_SHUTDOWN)
1040                 return 0;
1041
1042         /*
1043          * device map event
1044          * Add events are ignored here as the tables
1045          * are not fully initialised then.
1046          */
1047         if (!strncmp(uev->kernel, "dm-", 3)) {
1048                 if (!strncmp(uev->action, "change", 6)) {
1049                         r = uev_add_map(uev, vecs);
1050                         goto out;
1051                 }
1052                 if (!strncmp(uev->action, "remove", 6)) {
1053                         r = uev_remove_map(uev, vecs);
1054                         goto out;
1055                 }
1056                 goto out;
1057         }
1058
1059         /*
1060          * path add/remove event
1061          */
1062         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
1063                            uev->kernel) > 0)
1064                 goto out;
1065
1066         if (!strncmp(uev->action, "add", 3)) {
1067                 r = uev_add_path(uev, vecs);
1068                 goto out;
1069         }
1070         if (!strncmp(uev->action, "remove", 6)) {
1071                 r = uev_remove_path(uev, vecs);
1072                 goto out;
1073         }
1074         if (!strncmp(uev->action, "change", 6)) {
1075                 r = uev_update_path(uev, vecs);
1076                 goto out;
1077         }
1078
1079 out:
1080         return r;
1081 }
1082
1083 static void *
1084 ueventloop (void * ap)
1085 {
1086         struct udev *udev = ap;
1087
1088         if (uevent_listen(udev))
1089                 condlog(0, "error starting uevent listener");
1090
1091         return NULL;
1092 }
1093
1094 static void *
1095 uevqloop (void * ap)
1096 {
1097         if (uevent_dispatch(&uev_trigger, ap))
1098                 condlog(0, "error starting uevent dispatcher");
1099
1100         return NULL;
1101 }
1102 static void *
1103 uxlsnrloop (void * ap)
1104 {
1105         if (cli_init()) {
1106                 condlog(1, "Failed to init uxsock listener");
1107                 return NULL;
1108         }
1109
1110         set_handler_callback(LIST+PATHS, cli_list_paths);
1111         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1112         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1113         set_handler_callback(LIST+PATH, cli_list_path);
1114         set_handler_callback(LIST+MAPS, cli_list_maps);
1115         set_unlocked_handler_callback(LIST+STATUS, cli_list_status);
1116         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1117         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1118         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1119         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1120         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1121         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1122         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1123         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1124         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1125         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1126         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1127         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1128         set_handler_callback(LIST+CONFIG, cli_list_config);
1129         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1130         set_handler_callback(LIST+DEVICES, cli_list_devices);
1131         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1132         set_handler_callback(ADD+PATH, cli_add_path);
1133         set_handler_callback(DEL+PATH, cli_del_path);
1134         set_handler_callback(ADD+MAP, cli_add_map);
1135         set_handler_callback(DEL+MAP, cli_del_map);
1136         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1137         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1138         set_handler_callback(SUSPEND+MAP, cli_suspend);
1139         set_handler_callback(RESUME+MAP, cli_resume);
1140         set_handler_callback(RESIZE+MAP, cli_resize);
1141         set_handler_callback(RELOAD+MAP, cli_reload);
1142         set_handler_callback(RESET+MAP, cli_reassign);
1143         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1144         set_handler_callback(FAIL+PATH, cli_fail);
1145         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1146         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1147         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1148         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1149         set_unlocked_handler_callback(QUIT, cli_quit);
1150         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1151         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1152         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1153         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1154         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1155         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1156
1157         umask(077);
1158         uxsock_listen(&uxsock_trigger, ap);
1159
1160         return NULL;
1161 }
1162
1163 void
1164 exit_daemon (void)
1165 {
1166         post_config_state(DAEMON_SHUTDOWN);
1167 }
1168
1169 static void
1170 fail_path (struct path * pp, int del_active)
1171 {
1172         if (!pp->mpp)
1173                 return;
1174
1175         condlog(2, "checker failed path %s in map %s",
1176                  pp->dev_t, pp->mpp->alias);
1177
1178         dm_fail_path(pp->mpp->alias, pp->dev_t);
1179         if (del_active)
1180                 update_queue_mode_del_path(pp->mpp);
1181 }
1182
1183 /*
1184  * caller must have locked the path list before calling that function
1185  */
1186 static int
1187 reinstate_path (struct path * pp, int add_active)
1188 {
1189         int ret = 0;
1190
1191         if (!pp->mpp)
1192                 return 0;
1193
1194         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1195                 condlog(0, "%s: reinstate failed", pp->dev_t);
1196                 ret = 1;
1197         } else {
1198                 condlog(2, "%s: reinstated", pp->dev_t);
1199                 if (add_active)
1200                         update_queue_mode_add_path(pp->mpp);
1201         }
1202         return ret;
1203 }
1204
1205 static void
1206 enable_group(struct path * pp)
1207 {
1208         struct pathgroup * pgp;
1209
1210         /*
1211          * if path is added through uev_add_path, pgindex can be unset.
1212          * next update_strings() will set it, upon map reload event.
1213          *
1214          * we can safely return here, because upon map reload, all
1215          * PG will be enabled.
1216          */
1217         if (!pp->mpp->pg || !pp->pgindex)
1218                 return;
1219
1220         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1221
1222         if (pgp->status == PGSTATE_DISABLED) {
1223                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1224                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1225         }
1226 }
1227
1228 static void
1229 mpvec_garbage_collector (struct vectors * vecs)
1230 {
1231         struct multipath * mpp;
1232         unsigned int i;
1233
1234         if (!vecs->mpvec)
1235                 return;
1236
1237         vector_foreach_slot (vecs->mpvec, mpp, i) {
1238                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1239                         condlog(2, "%s: remove dead map", mpp->alias);
1240                         remove_map_and_stop_waiter(mpp, vecs, 1);
1241                         i--;
1242                 }
1243         }
1244 }
1245
1246 /* This is called after a path has started working again. It the multipath
1247  * device for this path uses the followover failback type, and this is the
1248  * best pathgroup, and this is the first path in the pathgroup to come back
1249  * up, then switch to this pathgroup */
1250 static int
1251 followover_should_failback(struct path * pp)
1252 {
1253         struct pathgroup * pgp;
1254         struct path *pp1;
1255         int i;
1256
1257         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1258             !pp->mpp->pg || !pp->pgindex ||
1259             pp->pgindex != pp->mpp->bestpg)
1260                 return 0;
1261
1262         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1263         vector_foreach_slot(pgp->paths, pp1, i) {
1264                 if (pp1 == pp)
1265                         continue;
1266                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1267                         return 0;
1268         }
1269         return 1;
1270 }
1271
1272 static void
1273 missing_uev_wait_tick(struct vectors *vecs)
1274 {
1275         struct multipath * mpp;
1276         unsigned int i;
1277         int timed_out = 0;
1278
1279         vector_foreach_slot (vecs->mpvec, mpp, i) {
1280                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1281                         timed_out = 1;
1282                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1283                         if (mpp->wait_for_udev > 1 && update_map(mpp, vecs)) {
1284                                 /* update_map removed map */
1285                                 i--;
1286                                 continue;
1287                         }
1288                         mpp->wait_for_udev = 0;
1289                 }
1290         }
1291
1292         if (timed_out && conf->delayed_reconfig &&
1293             !need_to_delay_reconfig(vecs)) {
1294                 condlog(2, "reconfigure (delayed)");
1295                 set_config_state(DAEMON_CONFIGURE);
1296         }
1297 }
1298
1299 static void
1300 defered_failback_tick (vector mpvec)
1301 {
1302         struct multipath * mpp;
1303         unsigned int i;
1304
1305         vector_foreach_slot (mpvec, mpp, i) {
1306                 /*
1307                  * defered failback getting sooner
1308                  */
1309                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1310                         mpp->failback_tick--;
1311
1312                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1313                                 switch_pathgroup(mpp);
1314                 }
1315         }
1316 }
1317
1318 static void
1319 retry_count_tick(vector mpvec)
1320 {
1321         struct multipath *mpp;
1322         unsigned int i;
1323
1324         vector_foreach_slot (mpvec, mpp, i) {
1325                 if (mpp->retry_tick > 0) {
1326                         mpp->stat_total_queueing_time++;
1327                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1328                         if(--mpp->retry_tick == 0) {
1329                                 dm_queue_if_no_path(mpp->alias, 0);
1330                                 condlog(2, "%s: Disable queueing", mpp->alias);
1331                         }
1332                 }
1333         }
1334 }
1335
1336 int update_prio(struct path *pp, int refresh_all)
1337 {
1338         int oldpriority;
1339         struct path *pp1;
1340         struct pathgroup * pgp;
1341         int i, j, changed = 0;
1342
1343         if (refresh_all) {
1344                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1345                         vector_foreach_slot (pgp->paths, pp1, j) {
1346                                 oldpriority = pp1->priority;
1347                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1348                                 if (pp1->priority != oldpriority)
1349                                         changed = 1;
1350                         }
1351                 }
1352                 return changed;
1353         }
1354         oldpriority = pp->priority;
1355         pathinfo(pp, conf->hwtable, DI_PRIO);
1356
1357         if (pp->priority == oldpriority)
1358                 return 0;
1359         return 1;
1360 }
1361
1362 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1363 {
1364         if (reload_map(vecs, mpp, refresh, 1))
1365                 return 1;
1366
1367         dm_lib_release();
1368         if (setup_multipath(vecs, mpp) != 0)
1369                 return 1;
1370         sync_map_state(mpp);
1371
1372         return 0;
1373 }
1374
1375 /*
1376  * Returns '1' if the path has been checked, '0' otherwise
1377  */
1378 int
1379 check_path (struct vectors * vecs, struct path * pp, int ticks)
1380 {
1381         int newstate;
1382         int new_path_up = 0;
1383         int chkr_new_path_up = 0;
1384         int add_active;
1385         int disable_reinstate = 0;
1386         int oldchkrstate = pp->chkrstate;
1387
1388         if ((pp->initialized == INIT_OK ||
1389              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1390                 return 0;
1391
1392         if (pp->tick)
1393                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1394         if (pp->tick)
1395                 return 0; /* don't check this path yet */
1396
1397         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1398             pp->retriggers < conf->retrigger_tries) {
1399                 condlog(2, "%s: triggering change event to reinitialize",
1400                         pp->dev);
1401                 pp->initialized = INIT_REQUESTED_UDEV;
1402                 pp->retriggers++;
1403                 sysfs_attr_set_value(pp->udev, "uevent", "change",
1404                                      strlen("change"));
1405                 return 0;
1406         }
1407
1408         /*
1409          * provision a next check soonest,
1410          * in case we exit abnormaly from here
1411          */
1412         pp->tick = conf->checkint;
1413
1414         newstate = path_offline(pp);
1415         /*
1416          * Wait for uevent for removed paths;
1417          * some LLDDs like zfcp keep paths unavailable
1418          * without sending uevents.
1419          */
1420         if (newstate == PATH_REMOVED)
1421                 newstate = PATH_DOWN;
1422
1423         if (newstate == PATH_UP)
1424                 newstate = get_state(pp, 1);
1425         else
1426                 checker_clear_message(&pp->checker);
1427
1428         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1429                 condlog(2, "%s: unusable path", pp->dev);
1430                 pathinfo(pp, conf->hwtable, 0);
1431                 return 1;
1432         }
1433         if (!pp->mpp) {
1434                 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1435                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1436                         condlog(2, "%s: add missing path", pp->dev);
1437                         if (pathinfo(pp, conf->hwtable, DI_ALL) == 0) {
1438                                 ev_add_path(pp, vecs);
1439                                 pp->tick = 1;
1440                         }
1441                 }
1442                 return 0;
1443         }
1444         /*
1445          * Async IO in flight. Keep the previous path state
1446          * and reschedule as soon as possible
1447          */
1448         if (newstate == PATH_PENDING) {
1449                 pp->tick = 1;
1450                 return 0;
1451         }
1452         /*
1453          * Synchronize with kernel state
1454          */
1455         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1456                 condlog(1, "%s: Could not synchronize with kernel state",
1457                         pp->dev);
1458                 pp->dmstate = PSTATE_UNDEF;
1459         }
1460         /* if update_multipath_strings orphaned the path, quit early */
1461         if (!pp->mpp)
1462                 return 0;
1463
1464         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1465              pp->wait_checks > 0) {
1466                 if (pp->mpp && pp->mpp->nr_active > 0) {
1467                         pp->state = PATH_DELAYED;
1468                         pp->wait_checks--;
1469                         return 1;
1470                 } else
1471                         pp->wait_checks = 0;
1472         }
1473
1474         /*
1475          * don't reinstate failed path, if its in stand-by
1476          * and if target supports only implicit tpgs mode.
1477          * this will prevent unnecessary i/o by dm on stand-by
1478          * paths if there are no other active paths in map.
1479          */
1480         disable_reinstate = (newstate == PATH_GHOST &&
1481                             pp->mpp->nr_active == 0 &&
1482                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1483
1484         pp->chkrstate = newstate;
1485         if (newstate != pp->state) {
1486                 int oldstate = pp->state;
1487                 pp->state = newstate;
1488
1489                 if (strlen(checker_message(&pp->checker)))
1490                         LOG_MSG(1, checker_message(&pp->checker));
1491
1492                 /*
1493                  * upon state change, reset the checkint
1494                  * to the shortest delay
1495                  */
1496                 pp->checkint = conf->checkint;
1497
1498                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1499                         /*
1500                          * proactively fail path in the DM
1501                          */
1502                         if (oldstate == PATH_UP ||
1503                             oldstate == PATH_GHOST) {
1504                                 fail_path(pp, 1);
1505                                 if (pp->mpp->delay_wait_checks > 0 &&
1506                                     pp->watch_checks > 0) {
1507                                         pp->wait_checks = pp->mpp->delay_wait_checks;
1508                                         pp->watch_checks = 0;
1509                                 }
1510                         }else
1511                                 fail_path(pp, 0);
1512
1513                         /*
1514                          * cancel scheduled failback
1515                          */
1516                         pp->mpp->failback_tick = 0;
1517
1518                         pp->mpp->stat_path_failures++;
1519                         return 1;
1520                 }
1521
1522                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1523                         if ( pp->mpp && pp->mpp->prflag ){
1524                                 /*
1525                                  * Check Persistent Reservation.
1526                                  */
1527                         condlog(2, "%s: checking persistent reservation "
1528                                 "registration", pp->dev);
1529                         mpath_pr_event_handle(pp);
1530                         }
1531                 }
1532
1533                 /*
1534                  * reinstate this path
1535                  */
1536                 if (oldstate != PATH_UP &&
1537                     oldstate != PATH_GHOST) {
1538                         if (pp->mpp->delay_watch_checks > 0)
1539                                 pp->watch_checks = pp->mpp->delay_watch_checks;
1540                         add_active = 1;
1541                 } else {
1542                         if (pp->watch_checks > 0)
1543                                 pp->watch_checks--;
1544                         add_active = 0;
1545                 }
1546                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
1547                         condlog(3, "%s: reload map", pp->dev);
1548                         ev_add_path(pp, vecs);
1549                         pp->tick = 1;
1550                         return 0;
1551                 }
1552                 new_path_up = 1;
1553
1554                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1555                         chkr_new_path_up = 1;
1556
1557                 /*
1558                  * if at least one path is up in a group, and
1559                  * the group is disabled, re-enable it
1560                  */
1561                 if (newstate == PATH_UP)
1562                         enable_group(pp);
1563         }
1564         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1565                 if ((pp->dmstate == PSTATE_FAILED ||
1566                     pp->dmstate == PSTATE_UNDEF) &&
1567                     !disable_reinstate) {
1568                         /* Clear IO errors */
1569                         if (reinstate_path(pp, 0)) {
1570                                 condlog(3, "%s: reload map", pp->dev);
1571                                 ev_add_path(pp, vecs);
1572                                 pp->tick = 1;
1573                                 return 0;
1574                         }
1575                 } else {
1576                         LOG_MSG(4, checker_message(&pp->checker));
1577                         if (pp->checkint != conf->max_checkint) {
1578                                 /*
1579                                  * double the next check delay.
1580                                  * max at conf->max_checkint
1581                                  */
1582                                 if (pp->checkint < (conf->max_checkint / 2))
1583                                         pp->checkint = 2 * pp->checkint;
1584                                 else
1585                                         pp->checkint = conf->max_checkint;
1586
1587                                 condlog(4, "%s: delay next check %is",
1588                                         pp->dev_t, pp->checkint);
1589                         }
1590                         if (pp->watch_checks > 0)
1591                                 pp->watch_checks--;
1592                         pp->tick = pp->checkint;
1593                 }
1594         }
1595         else if (newstate == PATH_DOWN &&
1596                  strlen(checker_message(&pp->checker))) {
1597                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1598                         LOG_MSG(3, checker_message(&pp->checker));
1599                 else
1600                         LOG_MSG(2, checker_message(&pp->checker));
1601         }
1602
1603         pp->state = newstate;
1604
1605
1606         if (pp->mpp->wait_for_udev)
1607                 return 1;
1608         /*
1609          * path prio refreshing
1610          */
1611         condlog(4, "path prio refresh");
1612
1613         if (update_prio(pp, new_path_up) &&
1614             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1615              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1616                 update_path_groups(pp->mpp, vecs, !new_path_up);
1617         else if (need_switch_pathgroup(pp->mpp, 0)) {
1618                 if (pp->mpp->pgfailback > 0 &&
1619                     (new_path_up || pp->mpp->failback_tick <= 0))
1620                         pp->mpp->failback_tick =
1621                                 pp->mpp->pgfailback + 1;
1622                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1623                          (chkr_new_path_up && followover_should_failback(pp)))
1624                         switch_pathgroup(pp->mpp);
1625         }
1626         return 1;
1627 }
1628
1629 static void *
1630 checkerloop (void *ap)
1631 {
1632         struct vectors *vecs;
1633         struct path *pp;
1634         int count = 0;
1635         unsigned int i;
1636         struct itimerval timer_tick_it;
1637         struct timeval last_time;
1638
1639         mlockall(MCL_CURRENT | MCL_FUTURE);
1640         vecs = (struct vectors *)ap;
1641         condlog(2, "path checkers start up");
1642
1643         /*
1644          * init the path check interval
1645          */
1646         vector_foreach_slot (vecs->pathvec, pp, i) {
1647                 pp->checkint = conf->checkint;
1648         }
1649
1650         /* Tweak start time for initial path check */
1651         if (gettimeofday(&last_time, NULL) != 0)
1652                 last_time.tv_sec = 0;
1653         else
1654                 last_time.tv_sec -= 1;
1655
1656         while (1) {
1657                 struct timeval diff_time, start_time, end_time;
1658                 int num_paths = 0, ticks = 0, signo, strict_timing, rc = 0;
1659                 sigset_t mask;
1660
1661                 if (gettimeofday(&start_time, NULL) != 0)
1662                         start_time.tv_sec = 0;
1663                 if (start_time.tv_sec && last_time.tv_sec) {
1664                         timersub(&start_time, &last_time, &diff_time);
1665                         condlog(4, "tick (%lu.%06lu secs)",
1666                                 diff_time.tv_sec, diff_time.tv_usec);
1667                         last_time.tv_sec = start_time.tv_sec;
1668                         last_time.tv_usec = start_time.tv_usec;
1669                         ticks = diff_time.tv_sec;
1670                 } else {
1671                         ticks = 1;
1672                         condlog(4, "tick (%d ticks)", ticks);
1673                 }
1674 #ifdef USE_SYSTEMD
1675                 if (use_watchdog)
1676                         sd_notify(0, "WATCHDOG=1");
1677 #endif
1678                 rc = set_config_state(DAEMON_RUNNING);
1679                 if (rc == ETIMEDOUT) {
1680                         condlog(4, "timeout waiting for DAEMON_IDLE");
1681                         continue;
1682                 }
1683                 strict_timing = conf->strict_timing;
1684                 if (vecs->pathvec) {
1685                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1686                         lock(vecs->lock);
1687                         pthread_testcancel();
1688                         vector_foreach_slot (vecs->pathvec, pp, i) {
1689                                 num_paths += check_path(vecs, pp, ticks);
1690                         }
1691                         lock_cleanup_pop(vecs->lock);
1692                 }
1693                 if (vecs->mpvec) {
1694                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1695                         lock(vecs->lock);
1696                         pthread_testcancel();
1697                         defered_failback_tick(vecs->mpvec);
1698                         retry_count_tick(vecs->mpvec);
1699                         missing_uev_wait_tick(vecs);
1700                         lock_cleanup_pop(vecs->lock);
1701                 }
1702                 if (count)
1703                         count--;
1704                 else {
1705                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1706                         lock(vecs->lock);
1707                         pthread_testcancel();
1708                         condlog(4, "map garbage collection");
1709                         mpvec_garbage_collector(vecs);
1710                         count = MAPGCINT;
1711                         lock_cleanup_pop(vecs->lock);
1712                 }
1713
1714                 diff_time.tv_usec = 0;
1715                 if (start_time.tv_sec &&
1716                     gettimeofday(&end_time, NULL) == 0) {
1717                         timersub(&end_time, &start_time, &diff_time);
1718                         if (num_paths) {
1719                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
1720                                         num_paths, num_paths > 1 ? "s" : "",
1721                                         diff_time.tv_sec, diff_time.tv_usec);
1722                                 if (diff_time.tv_sec > conf->max_checkint)
1723                                         condlog(1, "path checkers took longer "
1724                                                 "than %lu seconds, consider "
1725                                                 "increasing max_polling_interval",
1726                                                 diff_time.tv_sec);
1727                         }
1728                 }
1729
1730                 post_config_state(DAEMON_IDLE);
1731                 if (!strict_timing)
1732                         sleep(1);
1733                 else {
1734                         timer_tick_it.it_interval.tv_sec = 0;
1735                         timer_tick_it.it_interval.tv_usec = 0;
1736                         if (diff_time.tv_usec) {
1737                                 timer_tick_it.it_value.tv_sec = 0;
1738                                 timer_tick_it.it_value.tv_usec =
1739                                         (unsigned long)1000000 - diff_time.tv_usec;
1740                         } else {
1741                                 timer_tick_it.it_value.tv_sec = 1;
1742                                 timer_tick_it.it_value.tv_usec = 0;
1743                         }
1744                         setitimer(ITIMER_REAL, &timer_tick_it, NULL);
1745
1746                         sigemptyset(&mask);
1747                         sigaddset(&mask, SIGALRM);
1748                         condlog(3, "waiting for %lu.%06lu secs",
1749                                 timer_tick_it.it_value.tv_sec,
1750                                 timer_tick_it.it_value.tv_usec);
1751                         if (sigwait(&mask, &signo) != 0) {
1752                                 condlog(3, "sigwait failed with error %d",
1753                                         errno);
1754                                 conf->strict_timing = 0;
1755                                 break;
1756                         }
1757                 }
1758         }
1759         return NULL;
1760 }
1761
1762 int
1763 configure (struct vectors * vecs, int start_waiters)
1764 {
1765         struct multipath * mpp;
1766         struct path * pp;
1767         vector mpvec;
1768         int i, ret;
1769
1770         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1771                 return 1;
1772
1773         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1774                 return 1;
1775
1776         if (!(mpvec = vector_alloc()))
1777                 return 1;
1778
1779         /*
1780          * probe for current path (from sysfs) and map (from dm) sets
1781          */
1782         ret = path_discovery(vecs->pathvec, conf, DI_ALL);
1783         if (ret < 0)
1784                 return 1;
1785
1786         vector_foreach_slot (vecs->pathvec, pp, i){
1787                 if (filter_path(conf, pp) > 0){
1788                         vector_del_slot(vecs->pathvec, i);
1789                         free_path(pp);
1790                         i--;
1791                 }
1792                 else
1793                         pp->checkint = conf->checkint;
1794         }
1795         if (map_discovery(vecs))
1796                 return 1;
1797
1798         /*
1799          * create new set of maps & push changed ones into dm
1800          */
1801         if (coalesce_paths(vecs, mpvec, NULL, 1, 1))
1802                 return 1;
1803
1804         /*
1805          * may need to remove some maps which are no longer relevant
1806          * e.g., due to blacklist changes in conf file
1807          */
1808         if (coalesce_maps(vecs, mpvec))
1809                 return 1;
1810
1811         dm_lib_release();
1812
1813         sync_maps_state(mpvec);
1814         vector_foreach_slot(mpvec, mpp, i){
1815                 remember_wwid(mpp->wwid);
1816                 update_map_pr(mpp);
1817         }
1818
1819         /*
1820          * purge dm of old maps
1821          */
1822         remove_maps(vecs);
1823
1824         /*
1825          * save new set of maps formed by considering current path state
1826          */
1827         vector_free(vecs->mpvec);
1828         vecs->mpvec = mpvec;
1829
1830         /*
1831          * start dm event waiter threads for these new maps
1832          */
1833         vector_foreach_slot(vecs->mpvec, mpp, i) {
1834                 if (setup_multipath(vecs, mpp))
1835                         return 1;
1836                 if (start_waiters)
1837                         if (start_waiter_thread(mpp, vecs))
1838                                 return 1;
1839         }
1840         return 0;
1841 }
1842
1843 int
1844 need_to_delay_reconfig(struct vectors * vecs)
1845 {
1846         struct multipath *mpp;
1847         int i;
1848
1849         if (!VECTOR_SIZE(vecs->mpvec))
1850                 return 0;
1851
1852         vector_foreach_slot(vecs->mpvec, mpp, i) {
1853                 if (mpp->wait_for_udev)
1854                         return 1;
1855         }
1856         return 0;
1857 }
1858
1859 int
1860 reconfigure (struct vectors * vecs)
1861 {
1862         struct config * old = conf;
1863         int retval = 1;
1864
1865         /*
1866          * free old map and path vectors ... they use old conf state
1867          */
1868         if (VECTOR_SIZE(vecs->mpvec))
1869                 remove_maps_and_stop_waiters(vecs);
1870
1871         if (VECTOR_SIZE(vecs->pathvec))
1872                 free_pathvec(vecs->pathvec, FREE_PATHS);
1873
1874         vecs->pathvec = NULL;
1875         conf = NULL;
1876
1877         /* Re-read any timezone changes */
1878         tzset();
1879
1880         if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1881                 dm_drv_version(conf->version, TGT_MPATH);
1882                 conf->verbosity = old->verbosity;
1883                 conf->bindings_read_only = old->bindings_read_only;
1884                 conf->ignore_new_devs = old->ignore_new_devs;
1885                 configure(vecs, 1);
1886                 free_config(old);
1887                 retval = 0;
1888         } else {
1889                 conf = old;
1890         }
1891         uxsock_timeout = conf->uxsock_timeout;
1892
1893         return retval;
1894 }
1895
1896 static struct vectors *
1897 init_vecs (void)
1898 {
1899         struct vectors * vecs;
1900
1901         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1902
1903         if (!vecs)
1904                 return NULL;
1905
1906         vecs->lock.mutex =
1907                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1908
1909         if (!vecs->lock.mutex)
1910                 goto out;
1911
1912         pthread_mutex_init(vecs->lock.mutex, NULL);
1913         vecs->lock.depth = 0;
1914
1915         return vecs;
1916
1917 out:
1918         FREE(vecs);
1919         condlog(0, "failed to init paths");
1920         return NULL;
1921 }
1922
1923 static void *
1924 signal_set(int signo, void (*func) (int))
1925 {
1926         int r;
1927         struct sigaction sig;
1928         struct sigaction osig;
1929
1930         sig.sa_handler = func;
1931         sigemptyset(&sig.sa_mask);
1932         sig.sa_flags = 0;
1933
1934         r = sigaction(signo, &sig, &osig);
1935
1936         if (r < 0)
1937                 return (SIG_ERR);
1938         else
1939                 return (osig.sa_handler);
1940 }
1941
1942 void
1943 handle_signals(void)
1944 {
1945         if (reconfig_sig) {
1946                 condlog(2, "reconfigure (signal)");
1947                 set_config_state(DAEMON_CONFIGURE);
1948         }
1949         if (log_reset_sig) {
1950                 condlog(2, "reset log (signal)");
1951                 pthread_mutex_lock(&logq_lock);
1952                 log_reset("multipathd");
1953                 pthread_mutex_unlock(&logq_lock);
1954         }
1955         reconfig_sig = 0;
1956         log_reset_sig = 0;
1957 }
1958
1959 static void
1960 sighup (int sig)
1961 {
1962         reconfig_sig = 1;
1963 }
1964
1965 static void
1966 sigend (int sig)
1967 {
1968         exit_daemon();
1969 }
1970
1971 static void
1972 sigusr1 (int sig)
1973 {
1974         log_reset_sig = 1;
1975 }
1976
1977 static void
1978 sigusr2 (int sig)
1979 {
1980         condlog(3, "SIGUSR2 received");
1981 }
1982
1983 static void
1984 signal_init(void)
1985 {
1986         sigset_t set;
1987
1988         sigemptyset(&set);
1989         sigaddset(&set, SIGHUP);
1990         sigaddset(&set, SIGUSR1);
1991         sigaddset(&set, SIGUSR2);
1992         sigaddset(&set, SIGALRM);
1993         pthread_sigmask(SIG_BLOCK, &set, NULL);
1994
1995         signal_set(SIGHUP, sighup);
1996         signal_set(SIGUSR1, sigusr1);
1997         signal_set(SIGUSR2, sigusr2);
1998         signal_set(SIGINT, sigend);
1999         signal_set(SIGTERM, sigend);
2000         signal(SIGPIPE, SIG_IGN);
2001 }
2002
2003 static void
2004 setscheduler (void)
2005 {
2006         int res;
2007         static struct sched_param sched_param = {
2008                 .sched_priority = 99
2009         };
2010
2011         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2012
2013         if (res == -1)
2014                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2015         return;
2016 }
2017
2018 static void
2019 set_oom_adj (void)
2020 {
2021 #ifdef OOM_SCORE_ADJ_MIN
2022         int retry = 1;
2023         char *file = "/proc/self/oom_score_adj";
2024         int score = OOM_SCORE_ADJ_MIN;
2025 #else
2026         int retry = 0;
2027         char *file = "/proc/self/oom_adj";
2028         int score = OOM_ADJUST_MIN;
2029 #endif
2030         FILE *fp;
2031         struct stat st;
2032         char *envp;
2033
2034         envp = getenv("OOMScoreAdjust");
2035         if (envp) {
2036                 condlog(3, "Using systemd provided OOMScoreAdjust");
2037                 return;
2038         }
2039         do {
2040                 if (stat(file, &st) == 0){
2041                         fp = fopen(file, "w");
2042                         if (!fp) {
2043                                 condlog(0, "couldn't fopen %s : %s", file,
2044                                         strerror(errno));
2045                                 return;
2046                         }
2047                         fprintf(fp, "%i", score);
2048                         fclose(fp);
2049                         return;
2050                 }
2051                 if (errno != ENOENT) {
2052                         condlog(0, "couldn't stat %s : %s", file,
2053                                 strerror(errno));
2054                         return;
2055                 }
2056 #ifdef OOM_ADJUST_MIN
2057                 file = "/proc/self/oom_adj";
2058                 score = OOM_ADJUST_MIN;
2059 #else
2060                 retry = 0;
2061 #endif
2062         } while (retry--);
2063         condlog(0, "couldn't adjust oom score");
2064 }
2065
2066 static int
2067 child (void * param)
2068 {
2069         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
2070         pthread_attr_t log_attr, misc_attr, uevent_attr;
2071         struct vectors * vecs;
2072         struct multipath * mpp;
2073         int i;
2074 #ifdef USE_SYSTEMD
2075         unsigned long checkint;
2076 #endif
2077         int rc;
2078         int pid_fd = -1;
2079         char *envp;
2080
2081         mlockall(MCL_CURRENT | MCL_FUTURE);
2082         signal_init();
2083
2084         udev = udev_new();
2085
2086         setup_thread_attr(&misc_attr, 64 * 1024, 1);
2087         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 1);
2088         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2089
2090         if (logsink == 1) {
2091                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2092                 log_thread_start(&log_attr);
2093                 pthread_attr_destroy(&log_attr);
2094         }
2095         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2096         if (pid_fd < 0) {
2097                 condlog(1, "failed to create pidfile");
2098                 if (logsink == 1)
2099                         log_thread_stop();
2100                 exit(1);
2101         }
2102
2103         post_config_state(DAEMON_START);
2104
2105         condlog(2, "--------start up--------");
2106         condlog(2, "read " DEFAULT_CONFIGFILE);
2107
2108         if (load_config(DEFAULT_CONFIGFILE, udev))
2109                 goto failed;
2110
2111         uxsock_timeout = conf->uxsock_timeout;
2112
2113         dm_init(conf->verbosity);
2114         dm_drv_version(conf->version, TGT_MPATH);
2115         if (init_checkers()) {
2116                 condlog(0, "failed to initialize checkers");
2117                 goto failed;
2118         }
2119         if (init_prio()) {
2120                 condlog(0, "failed to initialize prioritizers");
2121                 goto failed;
2122         }
2123
2124         setlogmask(LOG_UPTO(conf->verbosity + 3));
2125
2126         envp = getenv("LimitNOFILE");
2127
2128         if (envp) {
2129                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2130         } else if (conf->max_fds) {
2131                 struct rlimit fd_limit;
2132
2133                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2134                         condlog(0, "can't get open fds limit: %s",
2135                                 strerror(errno));
2136                         fd_limit.rlim_cur = 0;
2137                         fd_limit.rlim_max = 0;
2138                 }
2139                 if (fd_limit.rlim_cur < conf->max_fds) {
2140                         fd_limit.rlim_cur = conf->max_fds;
2141                         if (fd_limit.rlim_max < conf->max_fds)
2142                                 fd_limit.rlim_max = conf->max_fds;
2143                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2144                                 condlog(0, "can't set open fds limit to "
2145                                         "%lu/%lu : %s",
2146                                         fd_limit.rlim_cur, fd_limit.rlim_max,
2147                                         strerror(errno));
2148                         } else {
2149                                 condlog(3, "set open fds limit to %lu/%lu",
2150                                         fd_limit.rlim_cur, fd_limit.rlim_max);
2151                         }
2152                 }
2153
2154         }
2155
2156         vecs = gvecs = init_vecs();
2157         if (!vecs)
2158                 goto failed;
2159
2160         setscheduler();
2161         set_oom_adj();
2162
2163         dm_udev_set_sync_support(0);
2164 #ifdef USE_SYSTEMD
2165         envp = getenv("WATCHDOG_USEC");
2166         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2167                 /* Value is in microseconds */
2168                 conf->max_checkint = checkint / 1000000;
2169                 /* Rescale checkint */
2170                 if (conf->checkint > conf->max_checkint)
2171                         conf->checkint = conf->max_checkint;
2172                 else
2173                         conf->checkint = conf->max_checkint / 4;
2174                 condlog(3, "enabling watchdog, interval %d max %d",
2175                         conf->checkint, conf->max_checkint);
2176                 use_watchdog = conf->checkint;
2177         }
2178 #endif
2179         /*
2180          * Signal start of configuration
2181          */
2182         post_config_state(DAEMON_CONFIGURE);
2183
2184         /*
2185          * Start uevent listener early to catch events
2186          */
2187         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2188                 condlog(0, "failed to create uevent thread: %d", rc);
2189                 goto failed;
2190         }
2191         pthread_attr_destroy(&uevent_attr);
2192         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2193                 condlog(0, "failed to create cli listener: %d", rc);
2194                 goto failed;
2195         }
2196
2197         /*
2198          * start threads
2199          */
2200         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2201                 condlog(0,"failed to create checker loop thread: %d", rc);
2202                 goto failed;
2203         }
2204         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2205                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2206                 goto failed;
2207         }
2208         pthread_attr_destroy(&misc_attr);
2209
2210 #ifdef USE_SYSTEMD
2211         sd_notify(0, "READY=1");
2212 #endif
2213
2214         while (running_state != DAEMON_SHUTDOWN) {
2215                 pthread_cleanup_push(config_cleanup, NULL);
2216                 pthread_mutex_lock(&config_lock);
2217                 if (running_state != DAEMON_CONFIGURE &&
2218                     running_state != DAEMON_SHUTDOWN) {
2219                         pthread_cond_wait(&config_cond, &config_lock);
2220                 }
2221                 pthread_cleanup_pop(1);
2222                 if (running_state == DAEMON_CONFIGURE) {
2223                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2224                         lock(vecs->lock);
2225                         pthread_testcancel();
2226                         if (!need_to_delay_reconfig(vecs)) {
2227                                 reconfigure(vecs);
2228                         } else {
2229                                 conf->delayed_reconfig = 1;
2230                         }
2231                         lock_cleanup_pop(vecs->lock);
2232                         post_config_state(DAEMON_IDLE);
2233                 }
2234         }
2235
2236         lock(vecs->lock);
2237         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
2238                 vector_foreach_slot(vecs->mpvec, mpp, i)
2239                         dm_queue_if_no_path(mpp->alias, 0);
2240         remove_maps_and_stop_waiters(vecs);
2241         unlock(vecs->lock);
2242
2243         pthread_cancel(check_thr);
2244         pthread_cancel(uevent_thr);
2245         pthread_cancel(uxlsnr_thr);
2246         pthread_cancel(uevq_thr);
2247
2248         lock(vecs->lock);
2249         free_pathvec(vecs->pathvec, FREE_PATHS);
2250         vecs->pathvec = NULL;
2251         unlock(vecs->lock);
2252         /* Now all the waitevent threads will start rushing in. */
2253         while (vecs->lock.depth > 0) {
2254                 sleep (1); /* This is weak. */
2255                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
2256                         " waiting...", vecs->lock.depth);
2257         }
2258         pthread_mutex_destroy(vecs->lock.mutex);
2259         FREE(vecs->lock.mutex);
2260         vecs->lock.depth = 0;
2261         vecs->lock.mutex = NULL;
2262         FREE(vecs);
2263         vecs = NULL;
2264
2265         cleanup_checkers();
2266         cleanup_prio();
2267
2268         dm_lib_release();
2269         dm_lib_exit();
2270
2271         /* We're done here */
2272         condlog(3, "unlink pidfile");
2273         unlink(DEFAULT_PIDFILE);
2274
2275         condlog(2, "--------shut down-------");
2276
2277         if (logsink == 1)
2278                 log_thread_stop();
2279
2280         /*
2281          * Freeing config must be done after condlog() and dm_lib_exit(),
2282          * because logging functions like dlog() and dm_write_log()
2283          * reference the config.
2284          */
2285         free_config(conf);
2286         conf = NULL;
2287         udev_unref(udev);
2288         udev = NULL;
2289 #ifdef _DEBUG_
2290         dbg_free_final(NULL);
2291 #endif
2292
2293 #ifdef USE_SYSTEMD
2294         sd_notify(0, "ERRNO=0");
2295 #endif
2296         exit(0);
2297
2298 failed:
2299 #ifdef USE_SYSTEMD
2300         sd_notify(0, "ERRNO=1");
2301 #endif
2302         if (pid_fd >= 0)
2303                 close(pid_fd);
2304         exit(1);
2305 }
2306
2307 static int
2308 daemonize(void)
2309 {
2310         int pid;
2311         int dev_null_fd;
2312
2313         if( (pid = fork()) < 0){
2314                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2315                 return -1;
2316         }
2317         else if (pid != 0)
2318                 return pid;
2319
2320         setsid();
2321
2322         if ( (pid = fork()) < 0)
2323                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2324         else if (pid != 0)
2325                 _exit(0);
2326
2327         if (chdir("/") < 0)
2328                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2329
2330         dev_null_fd = open("/dev/null", O_RDWR);
2331         if (dev_null_fd < 0){
2332                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2333                         strerror(errno));
2334                 _exit(0);
2335         }
2336
2337         close(STDIN_FILENO);
2338         if (dup(dev_null_fd) < 0) {
2339                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2340                         strerror(errno));
2341                 _exit(0);
2342         }
2343         close(STDOUT_FILENO);
2344         if (dup(dev_null_fd) < 0) {
2345                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2346                         strerror(errno));
2347                 _exit(0);
2348         }
2349         close(STDERR_FILENO);
2350         if (dup(dev_null_fd) < 0) {
2351                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2352                         strerror(errno));
2353                 _exit(0);
2354         }
2355         close(dev_null_fd);
2356         daemon_pid = getpid();
2357         return 0;
2358 }
2359
2360 int
2361 main (int argc, char *argv[])
2362 {
2363         extern char *optarg;
2364         extern int optind;
2365         int arg;
2366         int err;
2367         int foreground = 0;
2368
2369         logsink = 1;
2370
2371         if (getuid() != 0) {
2372                 fprintf(stderr, "need to be root\n");
2373                 exit(1);
2374         }
2375
2376         /* make sure we don't lock any path */
2377         if (chdir("/") < 0)
2378                 fprintf(stderr, "can't chdir to root directory : %s\n",
2379                         strerror(errno));
2380         umask(umask(077) | 022);
2381
2382         conf = alloc_config();
2383
2384         if (!conf)
2385                 exit(1);
2386
2387         while ((arg = getopt(argc, argv, ":dsv:k::Bn")) != EOF ) {
2388         switch(arg) {
2389                 case 'd':
2390                         foreground = 1;
2391                         if (logsink > 0)
2392                                 logsink = 0;
2393                         //debug=1; /* ### comment me out ### */
2394                         break;
2395                 case 'v':
2396                         if (sizeof(optarg) > sizeof(char *) ||
2397                             !isdigit(optarg[0]))
2398                                 exit(1);
2399
2400                         conf->verbosity = atoi(optarg);
2401                         break;
2402                 case 's':
2403                         logsink = -1;
2404                         break;
2405                 case 'k':
2406                         if (load_config(DEFAULT_CONFIGFILE, udev_new()))
2407                                 exit(1);
2408                         uxclnt(optarg, uxsock_timeout + 100);
2409                         exit(0);
2410                 case 'B':
2411                         conf->bindings_read_only = 1;
2412                         break;
2413                 case 'n':
2414                         conf->ignore_new_devs = 1;
2415                         break;
2416                 default:
2417                         fprintf(stderr, "Invalid argument '-%c'\n",
2418                                 optopt);
2419                         exit(1);
2420                 }
2421         }
2422         if (optind < argc) {
2423                 char cmd[CMDSIZE];
2424                 char * s = cmd;
2425                 char * c = s;
2426
2427                 if (load_config(DEFAULT_CONFIGFILE, udev_new()))
2428                         exit(1);
2429                 memset(cmd, 0x0, CMDSIZE);
2430                 while (optind < argc) {
2431                         if (strchr(argv[optind], ' '))
2432                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
2433                         else
2434                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
2435                         optind++;
2436                 }
2437                 c += snprintf(c, s + CMDSIZE - c, "\n");
2438                 uxclnt(s, uxsock_timeout + 100);
2439                 exit(0);
2440         }
2441
2442         if (foreground) {
2443                 if (!isatty(fileno(stdout)))
2444                         setbuf(stdout, NULL);
2445                 err = 0;
2446                 daemon_pid = getpid();
2447         } else
2448                 err = daemonize();
2449
2450         if (err < 0)
2451                 /* error */
2452                 exit(1);
2453         else if (err > 0)
2454                 /* parent dies */
2455                 exit(0);
2456         else
2457                 /* child lives */
2458                 return (child(NULL));
2459 }
2460
2461 void *  mpath_pr_event_handler_fn (void * pathp )
2462 {
2463         struct multipath * mpp;
2464         int i,j, ret, isFound;
2465         struct path * pp = (struct path *)pathp;
2466         unsigned char *keyp;
2467         uint64_t prkey;
2468         struct prout_param_descriptor *param;
2469         struct prin_resp *resp;
2470
2471         mpp = pp->mpp;
2472
2473         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2474         if (!resp){
2475                 condlog(0,"%s Alloc failed for prin response", pp->dev);
2476                 return NULL;
2477         }
2478
2479         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2480         if (ret != MPATH_PR_SUCCESS )
2481         {
2482                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2483                 goto out;
2484         }
2485
2486         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2487                         resp->prin_descriptor.prin_readkeys.additional_length );
2488
2489         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2490         {
2491                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2492                 ret = MPATH_PR_SUCCESS;
2493                 goto out;
2494         }
2495         prkey = 0;
2496         keyp = (unsigned char *)mpp->reservation_key;
2497         for (j = 0; j < 8; ++j) {
2498                 if (j > 0)
2499                         prkey <<= 8;
2500                 prkey |= *keyp;
2501                 ++keyp;
2502         }
2503         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
2504
2505         isFound =0;
2506         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2507         {
2508                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
2509                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2510                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2511                 {
2512                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2513                         isFound =1;
2514                         break;
2515                 }
2516         }
2517         if (!isFound)
2518         {
2519                 condlog(0, "%s: Either device not registered or ", pp->dev);
2520                 condlog(0, "host is not authorised for registration. Skip path");
2521                 ret = MPATH_PR_OTHER;
2522                 goto out;
2523         }
2524
2525         param= malloc(sizeof(struct prout_param_descriptor));
2526         memset(param, 0 , sizeof(struct prout_param_descriptor));
2527
2528         for (j = 7; j >= 0; --j) {
2529                 param->sa_key[j] = (prkey & 0xff);
2530                 prkey >>= 8;
2531         }
2532         param->num_transportid = 0;
2533
2534         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2535
2536         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2537         if (ret != MPATH_PR_SUCCESS )
2538         {
2539                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2540         }
2541         mpp->prflag = 1;
2542
2543         free(param);
2544 out:
2545         free(resp);
2546         return NULL;
2547 }
2548
2549 int mpath_pr_event_handle(struct path *pp)
2550 {
2551         pthread_t thread;
2552         int rc;
2553         pthread_attr_t attr;
2554         struct multipath * mpp;
2555
2556         mpp = pp->mpp;
2557
2558         if (!mpp->reservation_key)
2559                 return -1;
2560
2561         pthread_attr_init(&attr);
2562         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2563
2564         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2565         if (rc) {
2566                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2567                 return -1;
2568         }
2569         pthread_attr_destroy(&attr);
2570         rc = pthread_join(thread, NULL);
2571         return 0;
2572 }
2573