libmultipath: move remove_map waiter code to multipathd
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <urcu.h>
21 #ifdef USE_SYSTEMD
22 #include <systemd/sd-daemon.h>
23 #endif
24 #include <semaphore.h>
25 #include <time.h>
26 #include <stdbool.h>
27
28 /*
29  * libmultipath
30  */
31 #include "time-util.h"
32
33 /*
34  * libcheckers
35  */
36 #include "checkers.h"
37
38 #ifdef USE_SYSTEMD
39 static int use_watchdog;
40 #endif
41
42 /*
43  * libmultipath
44  */
45 #include "parser.h"
46 #include "vector.h"
47 #include "memory.h"
48 #include "config.h"
49 #include "util.h"
50 #include "hwtable.h"
51 #include "defaults.h"
52 #include "structs.h"
53 #include "blacklist.h"
54 #include "structs_vec.h"
55 #include "dmparser.h"
56 #include "devmapper.h"
57 #include "sysfs.h"
58 #include "dict.h"
59 #include "discovery.h"
60 #include "debug.h"
61 #include "propsel.h"
62 #include "uevent.h"
63 #include "switchgroup.h"
64 #include "print.h"
65 #include "configure.h"
66 #include "prio.h"
67 #include "wwids.h"
68 #include "pgpolicies.h"
69 #include "uevent.h"
70 #include "log.h"
71
72 #include "mpath_cmd.h"
73 #include "mpath_persist.h"
74
75 #include "prioritizers/alua_rtpg.h"
76
77 #include "main.h"
78 #include "pidfile.h"
79 #include "uxlsnr.h"
80 #include "uxclnt.h"
81 #include "cli.h"
82 #include "cli_handlers.h"
83 #include "lock.h"
84 #include "waiter.h"
85 #include "io_err_stat.h"
86 #include "wwids.h"
87 #include "foreign.h"
88 #include "../third-party/valgrind/drd.h"
89
90 #define FILE_NAME_SIZE 256
91 #define CMDSIZE 160
92
93 #define LOG_MSG(a, b) \
94 do { \
95         if (pp->offline) \
96                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
97         else if (strlen(b)) \
98                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
99 } while(0)
100
101 struct mpath_event_param
102 {
103         char * devname;
104         struct multipath *mpp;
105 };
106
107 int logsink;
108 int uxsock_timeout;
109 int verbosity;
110 int bindings_read_only;
111 int ignore_new_devs;
112 enum daemon_status running_state = DAEMON_INIT;
113 pid_t daemon_pid;
114 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
115 pthread_cond_t config_cond;
116
117 /*
118  * global copy of vecs for use in sig handlers
119  */
120 struct vectors * gvecs;
121
122 struct udev * udev;
123
124 struct config *multipath_conf;
125
126 /* Local variables */
127 static volatile sig_atomic_t exit_sig;
128 static volatile sig_atomic_t reconfig_sig;
129 static volatile sig_atomic_t log_reset_sig;
130
131 const char *
132 daemon_status(void)
133 {
134         switch (running_state) {
135         case DAEMON_INIT:
136                 return "init";
137         case DAEMON_START:
138                 return "startup";
139         case DAEMON_CONFIGURE:
140                 return "configure";
141         case DAEMON_IDLE:
142                 return "idle";
143         case DAEMON_RUNNING:
144                 return "running";
145         case DAEMON_SHUTDOWN:
146                 return "shutdown";
147         }
148         return NULL;
149 }
150
151 /*
152  * I love you too, systemd ...
153  */
154 const char *
155 sd_notify_status(void)
156 {
157         switch (running_state) {
158         case DAEMON_INIT:
159                 return "STATUS=init";
160         case DAEMON_START:
161                 return "STATUS=startup";
162         case DAEMON_CONFIGURE:
163                 return "STATUS=configure";
164         case DAEMON_IDLE:
165         case DAEMON_RUNNING:
166                 return "STATUS=up";
167         case DAEMON_SHUTDOWN:
168                 return "STATUS=shutdown";
169         }
170         return NULL;
171 }
172
173 #ifdef USE_SYSTEMD
174 static void do_sd_notify(enum daemon_status old_state)
175 {
176         /*
177          * Checkerloop switches back and forth between idle and running state.
178          * No need to tell systemd each time.
179          * These notifications cause a lot of overhead on dbus.
180          */
181         if ((running_state == DAEMON_IDLE || running_state == DAEMON_RUNNING) &&
182             (old_state == DAEMON_IDLE || old_state == DAEMON_RUNNING))
183                 return;
184         sd_notify(0, sd_notify_status());
185 }
186 #endif
187
188 static void config_cleanup(void *arg)
189 {
190         pthread_mutex_unlock(&config_lock);
191 }
192
193 void post_config_state(enum daemon_status state)
194 {
195         pthread_mutex_lock(&config_lock);
196         if (state != running_state) {
197                 enum daemon_status old_state = running_state;
198
199                 running_state = state;
200                 pthread_cond_broadcast(&config_cond);
201 #ifdef USE_SYSTEMD
202                 do_sd_notify(old_state);
203 #endif
204         }
205         pthread_mutex_unlock(&config_lock);
206 }
207
208 int set_config_state(enum daemon_status state)
209 {
210         int rc = 0;
211
212         pthread_cleanup_push(config_cleanup, NULL);
213         pthread_mutex_lock(&config_lock);
214         if (running_state != state) {
215                 enum daemon_status old_state = running_state;
216
217                 if (running_state != DAEMON_IDLE) {
218                         struct timespec ts;
219
220                         clock_gettime(CLOCK_MONOTONIC, &ts);
221                         ts.tv_sec += 1;
222                         rc = pthread_cond_timedwait(&config_cond,
223                                                     &config_lock, &ts);
224                 }
225                 if (!rc) {
226                         running_state = state;
227                         pthread_cond_broadcast(&config_cond);
228 #ifdef USE_SYSTEMD
229                         do_sd_notify(old_state);
230 #endif
231                 }
232         }
233         pthread_cleanup_pop(1);
234         return rc;
235 }
236
237 struct config *get_multipath_config(void)
238 {
239         rcu_read_lock();
240         return rcu_dereference(multipath_conf);
241 }
242
243 void put_multipath_config(struct config *conf)
244 {
245         rcu_read_unlock();
246 }
247
248 static int
249 need_switch_pathgroup (struct multipath * mpp, int refresh)
250 {
251         struct pathgroup * pgp;
252         struct path * pp;
253         unsigned int i, j;
254         struct config *conf;
255         int bestpg;
256
257         if (!mpp)
258                 return 0;
259
260         /*
261          * Refresh path priority values
262          */
263         if (refresh) {
264                 vector_foreach_slot (mpp->pg, pgp, i) {
265                         vector_foreach_slot (pgp->paths, pp, j) {
266                                 conf = get_multipath_config();
267                                 pathinfo(pp, conf, DI_PRIO);
268                                 put_multipath_config(conf);
269                         }
270                 }
271         }
272
273         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
274                 return 0;
275
276         bestpg = select_path_group(mpp);
277         if (mpp->pgfailback == -FAILBACK_MANUAL)
278                 return 0;
279
280         mpp->bestpg = bestpg;
281         if (mpp->bestpg != mpp->nextpg)
282                 return 1;
283
284         return 0;
285 }
286
287 static void
288 switch_pathgroup (struct multipath * mpp)
289 {
290         mpp->stat_switchgroup++;
291         dm_switchgroup(mpp->alias, mpp->bestpg);
292         condlog(2, "%s: switch to path group #%i",
293                  mpp->alias, mpp->bestpg);
294 }
295
296 static void
297 remove_map_and_stop_waiter(struct multipath *mpp, struct vectors *vecs,
298                            int purge_vec)
299 {
300         stop_waiter_thread(mpp, vecs);
301         remove_map(mpp, vecs, purge_vec);
302 }
303
304 static void
305 remove_maps_and_stop_waiters(struct vectors *vecs)
306 {
307         int i;
308         struct multipath * mpp;
309
310         if (!vecs)
311                 return;
312
313         vector_foreach_slot(vecs->mpvec, mpp, i)
314                 stop_waiter_thread(mpp, vecs);
315
316         remove_maps(vecs);
317 }
318
319 static int
320 coalesce_maps(struct vectors *vecs, vector nmpv)
321 {
322         struct multipath * ompp;
323         vector ompv = vecs->mpvec;
324         unsigned int i, reassign_maps;
325         struct config *conf;
326
327         conf = get_multipath_config();
328         reassign_maps = conf->reassign_maps;
329         put_multipath_config(conf);
330         vector_foreach_slot (ompv, ompp, i) {
331                 condlog(3, "%s: coalesce map", ompp->alias);
332                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
333                         /*
334                          * remove all current maps not allowed by the
335                          * current configuration
336                          */
337                         if (dm_flush_map(ompp->alias)) {
338                                 condlog(0, "%s: unable to flush devmap",
339                                         ompp->alias);
340                                 /*
341                                  * may be just because the device is open
342                                  */
343                                 if (setup_multipath(vecs, ompp) != 0) {
344                                         i--;
345                                         continue;
346                                 }
347                                 if (!vector_alloc_slot(nmpv))
348                                         return 1;
349
350                                 vector_set_slot(nmpv, ompp);
351
352                                 vector_del_slot(ompv, i);
353                                 i--;
354                         }
355                         else {
356                                 dm_lib_release();
357                                 condlog(2, "%s devmap removed", ompp->alias);
358                         }
359                 } else if (reassign_maps) {
360                         condlog(3, "%s: Reassign existing device-mapper"
361                                 " devices", ompp->alias);
362                         dm_reassign(ompp->alias);
363                 }
364         }
365         return 0;
366 }
367
368 static void
369 sync_maps_state(vector mpvec)
370 {
371         unsigned int i;
372         struct multipath *mpp;
373
374         vector_foreach_slot (mpvec, mpp, i)
375                 sync_map_state(mpp);
376 }
377
378 static int
379 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
380 {
381         int r;
382
383         if (nopaths)
384                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
385         else
386                 r = dm_flush_map(mpp->alias);
387         /*
388          * clear references to this map before flushing so we can ignore
389          * the spurious uevent we may generate with the dm_flush_map call below
390          */
391         if (r) {
392                 /*
393                  * May not really be an error -- if the map was already flushed
394                  * from the device mapper by dmsetup(8) for instance.
395                  */
396                 if (r == 1)
397                         condlog(0, "%s: can't flush", mpp->alias);
398                 else {
399                         condlog(2, "%s: devmap deferred remove", mpp->alias);
400                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
401                 }
402                 return r;
403         }
404         else {
405                 dm_lib_release();
406                 condlog(2, "%s: map flushed", mpp->alias);
407         }
408
409         orphan_paths(vecs->pathvec, mpp);
410         remove_map_and_stop_waiter(mpp, vecs, 1);
411
412         return 0;
413 }
414
415 static int
416 uev_add_map (struct uevent * uev, struct vectors * vecs)
417 {
418         char *alias;
419         int major = -1, minor = -1, rc;
420
421         condlog(3, "%s: add map (uevent)", uev->kernel);
422         alias = uevent_get_dm_name(uev);
423         if (!alias) {
424                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
425                 major = uevent_get_major(uev);
426                 minor = uevent_get_minor(uev);
427                 alias = dm_mapname(major, minor);
428                 if (!alias) {
429                         condlog(2, "%s: mapname not found for %d:%d",
430                                 uev->kernel, major, minor);
431                         return 1;
432                 }
433         }
434         pthread_cleanup_push(cleanup_lock, &vecs->lock);
435         lock(&vecs->lock);
436         pthread_testcancel();
437         rc = ev_add_map(uev->kernel, alias, vecs);
438         lock_cleanup_pop(vecs->lock);
439         FREE(alias);
440         return rc;
441 }
442
443 /*
444  * ev_add_map expects that the multipath device already exists in kernel
445  * before it is called. It just adds a device to multipathd or updates an
446  * existing device.
447  */
448 int
449 ev_add_map (char * dev, const char * alias, struct vectors * vecs)
450 {
451         struct multipath * mpp;
452         int delayed_reconfig, reassign_maps;
453         struct config *conf;
454
455         if (!dm_is_mpath(alias)) {
456                 condlog(4, "%s: not a multipath map", alias);
457                 return 0;
458         }
459
460         mpp = find_mp_by_alias(vecs->mpvec, alias);
461
462         if (mpp) {
463                 if (mpp->wait_for_udev > 1) {
464                         condlog(2, "%s: performing delayed actions",
465                                 mpp->alias);
466                         if (update_map(mpp, vecs))
467                                 /* setup multipathd removed the map */
468                                 return 1;
469                 }
470                 conf = get_multipath_config();
471                 delayed_reconfig = conf->delayed_reconfig;
472                 reassign_maps = conf->reassign_maps;
473                 put_multipath_config(conf);
474                 if (mpp->wait_for_udev) {
475                         mpp->wait_for_udev = 0;
476                         if (delayed_reconfig &&
477                             !need_to_delay_reconfig(vecs)) {
478                                 condlog(2, "reconfigure (delayed)");
479                                 set_config_state(DAEMON_CONFIGURE);
480                                 return 0;
481                         }
482                 }
483                 /*
484                  * Not really an error -- we generate our own uevent
485                  * if we create a multipath mapped device as a result
486                  * of uev_add_path
487                  */
488                 if (reassign_maps) {
489                         condlog(3, "%s: Reassign existing device-mapper devices",
490                                 alias);
491                         dm_reassign(alias);
492                 }
493                 return 0;
494         }
495         condlog(2, "%s: adding map", alias);
496
497         /*
498          * now we can register the map
499          */
500         if ((mpp = add_map_without_path(vecs, alias))) {
501                 sync_map_state(mpp);
502                 condlog(2, "%s: devmap %s registered", alias, dev);
503                 return 0;
504         } else {
505                 condlog(2, "%s: ev_add_map failed", dev);
506                 return 1;
507         }
508 }
509
510 static int
511 uev_remove_map (struct uevent * uev, struct vectors * vecs)
512 {
513         char *alias;
514         int minor;
515         struct multipath *mpp;
516
517         condlog(2, "%s: remove map (uevent)", uev->kernel);
518         alias = uevent_get_dm_name(uev);
519         if (!alias) {
520                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
521                 return 0;
522         }
523         minor = uevent_get_minor(uev);
524
525         pthread_cleanup_push(cleanup_lock, &vecs->lock);
526         lock(&vecs->lock);
527         pthread_testcancel();
528         mpp = find_mp_by_minor(vecs->mpvec, minor);
529
530         if (!mpp) {
531                 condlog(2, "%s: devmap not registered, can't remove",
532                         uev->kernel);
533                 goto out;
534         }
535         if (strcmp(mpp->alias, alias)) {
536                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
537                         mpp->alias, mpp->dmi->minor, minor);
538                 goto out;
539         }
540
541         orphan_paths(vecs->pathvec, mpp);
542         remove_map_and_stop_waiter(mpp, vecs, 1);
543 out:
544         lock_cleanup_pop(vecs->lock);
545         FREE(alias);
546         return 0;
547 }
548
549 /* Called from CLI handler */
550 int
551 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
552 {
553         struct multipath * mpp;
554
555         mpp = find_mp_by_minor(vecs->mpvec, minor);
556
557         if (!mpp) {
558                 condlog(2, "%s: devmap not registered, can't remove",
559                         devname);
560                 return 1;
561         }
562         if (strcmp(mpp->alias, alias)) {
563                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
564                         mpp->alias, mpp->dmi->minor, minor);
565                 return 1;
566         }
567         return flush_map(mpp, vecs, 0);
568 }
569
570 static int
571 uev_add_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
572 {
573         struct path *pp;
574         int ret = 0, i;
575         struct config *conf;
576
577         condlog(2, "%s: add path (uevent)", uev->kernel);
578         if (strstr(uev->kernel, "..") != NULL) {
579                 /*
580                  * Don't allow relative device names in the pathvec
581                  */
582                 condlog(0, "%s: path name is invalid", uev->kernel);
583                 return 1;
584         }
585
586         pthread_cleanup_push(cleanup_lock, &vecs->lock);
587         lock(&vecs->lock);
588         pthread_testcancel();
589         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
590         if (pp) {
591                 int r;
592
593                 condlog(2, "%s: spurious uevent, path already in pathvec",
594                         uev->kernel);
595                 if (!pp->mpp && !strlen(pp->wwid)) {
596                         condlog(3, "%s: reinitialize path", uev->kernel);
597                         udev_device_unref(pp->udev);
598                         pp->udev = udev_device_ref(uev->udev);
599                         conf = get_multipath_config();
600                         r = pathinfo(pp, conf,
601                                      DI_ALL | DI_BLACKLIST);
602                         put_multipath_config(conf);
603                         if (r == PATHINFO_OK)
604                                 ret = ev_add_path(pp, vecs, need_do_map);
605                         else if (r == PATHINFO_SKIPPED) {
606                                 condlog(3, "%s: remove blacklisted path",
607                                         uev->kernel);
608                                 i = find_slot(vecs->pathvec, (void *)pp);
609                                 if (i != -1)
610                                         vector_del_slot(vecs->pathvec, i);
611                                 free_path(pp);
612                         } else {
613                                 condlog(0, "%s: failed to reinitialize path",
614                                         uev->kernel);
615                                 ret = 1;
616                         }
617                 }
618         }
619         lock_cleanup_pop(vecs->lock);
620         if (pp)
621                 return ret;
622
623         /*
624          * get path vital state
625          */
626         conf = get_multipath_config();
627         ret = alloc_path_with_pathinfo(conf, uev->udev,
628                                        uev->wwid, DI_ALL, &pp);
629         put_multipath_config(conf);
630         if (!pp) {
631                 if (ret == PATHINFO_SKIPPED)
632                         return 0;
633                 condlog(3, "%s: failed to get path info", uev->kernel);
634                 return 1;
635         }
636         pthread_cleanup_push(cleanup_lock, &vecs->lock);
637         lock(&vecs->lock);
638         pthread_testcancel();
639         ret = store_path(vecs->pathvec, pp);
640         if (!ret) {
641                 conf = get_multipath_config();
642                 pp->checkint = conf->checkint;
643                 put_multipath_config(conf);
644                 ret = ev_add_path(pp, vecs, need_do_map);
645         } else {
646                 condlog(0, "%s: failed to store path info, "
647                         "dropping event",
648                         uev->kernel);
649                 free_path(pp);
650                 ret = 1;
651         }
652         lock_cleanup_pop(vecs->lock);
653         return ret;
654 }
655
656 /*
657  * returns:
658  * 0: added
659  * 1: error
660  */
661 int
662 ev_add_path (struct path * pp, struct vectors * vecs, int need_do_map)
663 {
664         struct multipath * mpp;
665         char params[PARAMS_SIZE] = {0};
666         int retries = 3;
667         int start_waiter = 0;
668         int ret;
669
670         /*
671          * need path UID to go any further
672          */
673         if (strlen(pp->wwid) == 0) {
674                 condlog(0, "%s: failed to get path uid", pp->dev);
675                 goto fail; /* leave path added to pathvec */
676         }
677         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
678         if (mpp && mpp->wait_for_udev &&
679             (pathcount(mpp, PATH_UP) > 0 ||
680              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT &&
681               mpp->ghost_delay_tick <= 0))) {
682                 /* if wait_for_udev is set and valid paths exist */
683                 condlog(2, "%s: delaying path addition until %s is fully initialized", pp->dev, mpp->alias);
684                 mpp->wait_for_udev = 2;
685                 orphan_path(pp, "waiting for create to complete");
686                 return 0;
687         }
688
689         pp->mpp = mpp;
690 rescan:
691         if (mpp) {
692                 if (pp->size && mpp->size != pp->size) {
693                         condlog(0, "%s: failed to add new path %s, "
694                                 "device size mismatch",
695                                 mpp->alias, pp->dev);
696                         int i = find_slot(vecs->pathvec, (void *)pp);
697                         if (i != -1)
698                                 vector_del_slot(vecs->pathvec, i);
699                         free_path(pp);
700                         return 1;
701                 }
702
703                 condlog(4,"%s: adopting all paths for path %s",
704                         mpp->alias, pp->dev);
705                 if (adopt_paths(vecs->pathvec, mpp))
706                         goto fail; /* leave path added to pathvec */
707
708                 verify_paths(mpp, vecs);
709                 mpp->action = ACT_RELOAD;
710                 extract_hwe_from_path(mpp);
711         } else {
712                 if (!should_multipath(pp, vecs->pathvec)) {
713                         orphan_path(pp, "only one path");
714                         return 0;
715                 }
716                 condlog(4,"%s: creating new map", pp->dev);
717                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
718                         mpp->action = ACT_CREATE;
719                         /*
720                          * We don't depend on ACT_CREATE, as domap will
721                          * set it to ACT_NOTHING when complete.
722                          */
723                         start_waiter = 1;
724                 }
725                 if (!start_waiter)
726                         goto fail; /* leave path added to pathvec */
727         }
728
729         /* persistent reservation check*/
730         mpath_pr_event_handle(pp);
731
732         if (!need_do_map)
733                 return 0;
734
735         if (!dm_map_present(mpp->alias)) {
736                 mpp->action = ACT_CREATE;
737                 start_waiter = 1;
738         }
739         /*
740          * push the map to the device-mapper
741          */
742         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
743                 condlog(0, "%s: failed to setup map for addition of new "
744                         "path %s", mpp->alias, pp->dev);
745                 goto fail_map;
746         }
747         /*
748          * reload the map for the multipath mapped device
749          */
750 retry:
751         ret = domap(mpp, params, 1);
752         if (ret <= 0) {
753                 if (ret < 0 && retries-- > 0) {
754                         condlog(0, "%s: retry domap for addition of new "
755                                 "path %s", mpp->alias, pp->dev);
756                         sleep(1);
757                         goto retry;
758                 }
759                 condlog(0, "%s: failed in domap for addition of new "
760                         "path %s", mpp->alias, pp->dev);
761                 /*
762                  * deal with asynchronous uevents :((
763                  */
764                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
765                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
766                         sleep(1);
767                         update_mpp_paths(mpp, vecs->pathvec);
768                         goto rescan;
769                 }
770                 else if (mpp->action == ACT_RELOAD)
771                         condlog(0, "%s: giving up reload", mpp->alias);
772                 else
773                         goto fail_map;
774         }
775         dm_lib_release();
776
777         /*
778          * update our state from kernel regardless of create or reload
779          */
780         if (setup_multipath(vecs, mpp))
781                 goto fail; /* if setup_multipath fails, it removes the map */
782
783         sync_map_state(mpp);
784
785         if ((mpp->action == ACT_CREATE ||
786              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
787             start_waiter_thread(mpp, vecs))
788                         goto fail_map;
789
790         if (retries >= 0) {
791                 condlog(2, "%s [%s]: path added to devmap %s",
792                         pp->dev, pp->dev_t, mpp->alias);
793                 return 0;
794         } else
795                 goto fail;
796
797 fail_map:
798         remove_map(mpp, vecs, 1);
799 fail:
800         orphan_path(pp, "failed to add path");
801         return 1;
802 }
803
804 static int
805 uev_remove_path (struct uevent *uev, struct vectors * vecs, int need_do_map)
806 {
807         struct path *pp;
808         int ret;
809
810         condlog(2, "%s: remove path (uevent)", uev->kernel);
811         delete_foreign(uev->udev);
812
813         pthread_cleanup_push(cleanup_lock, &vecs->lock);
814         lock(&vecs->lock);
815         pthread_testcancel();
816         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
817         if (pp)
818                 ret = ev_remove_path(pp, vecs, need_do_map);
819         lock_cleanup_pop(vecs->lock);
820         if (!pp) {
821                 /* Not an error; path might have been purged earlier */
822                 condlog(0, "%s: path already removed", uev->kernel);
823                 return 0;
824         }
825         return ret;
826 }
827
828 int
829 ev_remove_path (struct path *pp, struct vectors * vecs, int need_do_map)
830 {
831         struct multipath * mpp;
832         int i, retval = 0;
833         char params[PARAMS_SIZE] = {0};
834
835         /*
836          * avoid referring to the map of an orphaned path
837          */
838         if ((mpp = pp->mpp)) {
839                 /*
840                  * transform the mp->pg vector of vectors of paths
841                  * into a mp->params string to feed the device-mapper
842                  */
843                 if (update_mpp_paths(mpp, vecs->pathvec)) {
844                         condlog(0, "%s: failed to update paths",
845                                 mpp->alias);
846                         goto fail;
847                 }
848                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
849                         vector_del_slot(mpp->paths, i);
850
851                 /*
852                  * remove the map IF removing the last path
853                  */
854                 if (VECTOR_SIZE(mpp->paths) == 0) {
855                         char alias[WWID_SIZE];
856
857                         /*
858                          * flush_map will fail if the device is open
859                          */
860                         strncpy(alias, mpp->alias, WWID_SIZE);
861                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
862                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
863                                 mpp->retry_tick = 0;
864                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
865                                 mpp->disable_queueing = 1;
866                                 mpp->stat_map_failures++;
867                                 dm_queue_if_no_path(mpp->alias, 0);
868                         }
869                         if (!flush_map(mpp, vecs, 1)) {
870                                 condlog(2, "%s: removed map after"
871                                         " removing all paths",
872                                         alias);
873                                 retval = 0;
874                                 goto out;
875                         }
876                         /*
877                          * Not an error, continue
878                          */
879                 }
880
881                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
882                         condlog(0, "%s: failed to setup map for"
883                                 " removal of path %s", mpp->alias, pp->dev);
884                         goto fail;
885                 }
886
887                 if (mpp->wait_for_udev) {
888                         mpp->wait_for_udev = 2;
889                         goto out;
890                 }
891
892                 if (!need_do_map)
893                         goto out;
894                 /*
895                  * reload the map
896                  */
897                 mpp->action = ACT_RELOAD;
898                 if (domap(mpp, params, 1) <= 0) {
899                         condlog(0, "%s: failed in domap for "
900                                 "removal of path %s",
901                                 mpp->alias, pp->dev);
902                         retval = 1;
903                 } else {
904                         /*
905                          * update our state from kernel
906                          */
907                         if (setup_multipath(vecs, mpp))
908                                 return 1;
909                         sync_map_state(mpp);
910
911                         condlog(2, "%s [%s]: path removed from map %s",
912                                 pp->dev, pp->dev_t, mpp->alias);
913                 }
914         }
915
916 out:
917         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
918                 vector_del_slot(vecs->pathvec, i);
919
920         free_path(pp);
921
922         return retval;
923
924 fail:
925         remove_map_and_stop_waiter(mpp, vecs, 1);
926         return 1;
927 }
928
929 static int
930 uev_update_path (struct uevent *uev, struct vectors * vecs)
931 {
932         int ro, retval = 0, rc;
933         struct path * pp;
934         struct config *conf;
935         int disable_changed_wwids;
936         int needs_reinit = 0;
937
938         switch ((rc = change_foreign(uev->udev))) {
939         case FOREIGN_OK:
940                 /* known foreign path, ignore event */
941                 return 0;
942         case FOREIGN_IGNORED:
943                 break;
944         case FOREIGN_ERR:
945                 condlog(3, "%s: error in change_foreign", __func__);
946                 break;
947         default:
948                 condlog(1, "%s: return code %d of change_forein is unsupported",
949                         __func__, rc);
950                 break;
951         }
952
953         conf = get_multipath_config();
954         disable_changed_wwids = conf->disable_changed_wwids;
955         put_multipath_config(conf);
956
957         ro = uevent_get_disk_ro(uev);
958
959         pthread_cleanup_push(cleanup_lock, &vecs->lock);
960         lock(&vecs->lock);
961         pthread_testcancel();
962
963         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
964         if (pp) {
965                 struct multipath *mpp = pp->mpp;
966                 char wwid[WWID_SIZE];
967
968                 strcpy(wwid, pp->wwid);
969                 get_uid(pp, pp->state, uev->udev);
970
971                 if (strncmp(wwid, pp->wwid, WWID_SIZE) != 0) {
972                         condlog(0, "%s: path wwid changed from '%s' to '%s'. %s",
973                                 uev->kernel, wwid, pp->wwid,
974                                 (disable_changed_wwids ? "disallowing" :
975                                  "continuing"));
976                         if (disable_changed_wwids &&
977                             (strlen(wwid) || pp->wwid_changed)) {
978                                 strcpy(pp->wwid, wwid);
979                                 if (!pp->wwid_changed) {
980                                         pp->wwid_changed = 1;
981                                         pp->tick = 1;
982                                         if (pp->mpp)
983                                                 dm_fail_path(pp->mpp->alias, pp->dev_t);
984                                 }
985                                 goto out;
986                         } else if (!disable_changed_wwids)
987                                 strcpy(pp->wwid, wwid);
988                         else
989                                 pp->wwid_changed = 0;
990                 } else {
991                         udev_device_unref(pp->udev);
992                         pp->udev = udev_device_ref(uev->udev);
993                         conf = get_multipath_config();
994                         if (pathinfo(pp, conf, DI_SYSFS|DI_NOIO) != PATHINFO_OK)
995                                 condlog(1, "%s: pathinfo failed after change uevent",
996                                         uev->kernel);
997                         put_multipath_config(conf);
998                 }
999
1000                 if (pp->initialized == INIT_REQUESTED_UDEV)
1001                         needs_reinit = 1;
1002                 else if (mpp && ro >= 0) {
1003                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1004
1005                         if (mpp->wait_for_udev)
1006                                 mpp->wait_for_udev = 2;
1007                         else {
1008                                 if (ro == 1)
1009                                         pp->mpp->force_readonly = 1;
1010                                 retval = reload_map(vecs, mpp, 0, 1);
1011                                 pp->mpp->force_readonly = 0;
1012                                 condlog(2, "%s: map %s reloaded (retval %d)",
1013                                         uev->kernel, mpp->alias, retval);
1014                         }
1015                 }
1016         }
1017 out:
1018         lock_cleanup_pop(vecs->lock);
1019         if (!pp) {
1020                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1021                 if (uev->udev) {
1022                         int flag = DI_SYSFS | DI_WWID;
1023
1024                         conf = get_multipath_config();
1025                         retval = alloc_path_with_pathinfo(conf, uev->udev, uev->wwid, flag, NULL);
1026                         put_multipath_config(conf);
1027
1028                         if (retval == PATHINFO_SKIPPED) {
1029                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1030                                 return 0;
1031                         }
1032                 }
1033
1034                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1035         }
1036         if (needs_reinit)
1037                 retval = uev_add_path(uev, vecs, 1);
1038         return retval;
1039 }
1040
1041 static int
1042 uev_pathfail_check(struct uevent *uev, struct vectors *vecs)
1043 {
1044         char *action = NULL, *devt = NULL;
1045         struct path *pp;
1046         int r = 1;
1047
1048         action = uevent_get_dm_action(uev);
1049         if (!action)
1050                 return 1;
1051         if (strncmp(action, "PATH_FAILED", 11))
1052                 goto out;
1053         devt = uevent_get_dm_path(uev);
1054         if (!devt) {
1055                 condlog(3, "%s: No DM_PATH in uevent", uev->kernel);
1056                 goto out;
1057         }
1058
1059         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1060         lock(&vecs->lock);
1061         pthread_testcancel();
1062         pp = find_path_by_devt(vecs->pathvec, devt);
1063         if (!pp)
1064                 goto out_lock;
1065         r = io_err_stat_handle_pathfail(pp);
1066         if (r)
1067                 condlog(3, "io_err_stat: %s: cannot handle pathfail uevent",
1068                                 pp->dev);
1069 out_lock:
1070         lock_cleanup_pop(vecs->lock);
1071         FREE(devt);
1072         FREE(action);
1073         return r;
1074 out:
1075         FREE(action);
1076         return 1;
1077 }
1078
1079 static int
1080 map_discovery (struct vectors * vecs)
1081 {
1082         struct multipath * mpp;
1083         unsigned int i;
1084
1085         if (dm_get_maps(vecs->mpvec))
1086                 return 1;
1087
1088         vector_foreach_slot (vecs->mpvec, mpp, i)
1089                 if (update_multipath_table(mpp, vecs->pathvec, 1) ||
1090                     update_multipath_status(mpp)) {
1091                         remove_map(mpp, vecs, 1);
1092                         i--;
1093                 }
1094
1095         return 0;
1096 }
1097
1098 int
1099 uxsock_trigger (char * str, char ** reply, int * len, bool is_root,
1100                 void * trigger_data)
1101 {
1102         struct vectors * vecs;
1103         int r;
1104
1105         *reply = NULL;
1106         *len = 0;
1107         vecs = (struct vectors *)trigger_data;
1108
1109         if ((str != NULL) && (is_root == false) &&
1110             (strncmp(str, "list", strlen("list")) != 0) &&
1111             (strncmp(str, "show", strlen("show")) != 0)) {
1112                 *reply = STRDUP("permission deny: need to be root");
1113                 if (*reply)
1114                         *len = strlen(*reply) + 1;
1115                 return 1;
1116         }
1117
1118         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1119
1120         if (r > 0) {
1121                 if (r == ETIMEDOUT)
1122                         *reply = STRDUP("timeout\n");
1123                 else
1124                         *reply = STRDUP("fail\n");
1125                 if (*reply)
1126                         *len = strlen(*reply) + 1;
1127                 r = 1;
1128         }
1129         else if (!r && *len == 0) {
1130                 *reply = STRDUP("ok\n");
1131                 if (*reply)
1132                         *len = strlen(*reply) + 1;
1133                 r = 0;
1134         }
1135         /* else if (r < 0) leave *reply alone */
1136
1137         return r;
1138 }
1139
1140 int
1141 uev_trigger (struct uevent * uev, void * trigger_data)
1142 {
1143         int r = 0;
1144         struct vectors * vecs;
1145         struct uevent *merge_uev, *tmp;
1146
1147         vecs = (struct vectors *)trigger_data;
1148
1149         pthread_cleanup_push(config_cleanup, NULL);
1150         pthread_mutex_lock(&config_lock);
1151         if (running_state != DAEMON_IDLE &&
1152             running_state != DAEMON_RUNNING)
1153                 pthread_cond_wait(&config_cond, &config_lock);
1154         pthread_cleanup_pop(1);
1155
1156         if (running_state == DAEMON_SHUTDOWN)
1157                 return 0;
1158
1159         /*
1160          * device map event
1161          * Add events are ignored here as the tables
1162          * are not fully initialised then.
1163          */
1164         if (!strncmp(uev->kernel, "dm-", 3)) {
1165                 if (!uevent_is_mpath(uev)) {
1166                         if (!strncmp(uev->action, "change", 6))
1167                                 (void)add_foreign(uev->udev);
1168                         else if (!strncmp(uev->action, "remove", 6))
1169                                 (void)delete_foreign(uev->udev);
1170                         goto out;
1171                 }
1172                 if (!strncmp(uev->action, "change", 6)) {
1173                         r = uev_add_map(uev, vecs);
1174
1175                         /*
1176                          * the kernel-side dm-mpath issues a PATH_FAILED event
1177                          * when it encounters a path IO error. It is reason-
1178                          * able be the entry of path IO error accounting pro-
1179                          * cess.
1180                          */
1181                         uev_pathfail_check(uev, vecs);
1182                 } else if (!strncmp(uev->action, "remove", 6)) {
1183                         r = uev_remove_map(uev, vecs);
1184                 }
1185                 goto out;
1186         }
1187
1188         /*
1189          * path add/remove/change event, add/remove maybe merged
1190          */
1191         list_for_each_entry_safe(merge_uev, tmp, &uev->merge_node, node) {
1192                 if (!strncmp(merge_uev->action, "add", 3))
1193                         r += uev_add_path(merge_uev, vecs, 0);
1194                 if (!strncmp(merge_uev->action, "remove", 6))
1195                         r += uev_remove_path(merge_uev, vecs, 0);
1196         }
1197
1198         if (!strncmp(uev->action, "add", 3))
1199                 r += uev_add_path(uev, vecs, 1);
1200         if (!strncmp(uev->action, "remove", 6))
1201                 r += uev_remove_path(uev, vecs, 1);
1202         if (!strncmp(uev->action, "change", 6))
1203                 r += uev_update_path(uev, vecs);
1204
1205 out:
1206         return r;
1207 }
1208
1209 static void rcu_unregister(void *param)
1210 {
1211         rcu_unregister_thread();
1212 }
1213
1214 static void *
1215 ueventloop (void * ap)
1216 {
1217         struct udev *udev = ap;
1218
1219         pthread_cleanup_push(rcu_unregister, NULL);
1220         rcu_register_thread();
1221         if (uevent_listen(udev))
1222                 condlog(0, "error starting uevent listener");
1223         pthread_cleanup_pop(1);
1224         return NULL;
1225 }
1226
1227 static void *
1228 uevqloop (void * ap)
1229 {
1230         pthread_cleanup_push(rcu_unregister, NULL);
1231         rcu_register_thread();
1232         if (uevent_dispatch(&uev_trigger, ap))
1233                 condlog(0, "error starting uevent dispatcher");
1234         pthread_cleanup_pop(1);
1235         return NULL;
1236 }
1237 static void *
1238 uxlsnrloop (void * ap)
1239 {
1240         if (cli_init()) {
1241                 condlog(1, "Failed to init uxsock listener");
1242                 return NULL;
1243         }
1244         pthread_cleanup_push(rcu_unregister, NULL);
1245         rcu_register_thread();
1246         set_handler_callback(LIST+PATHS, cli_list_paths);
1247         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1248         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1249         set_handler_callback(LIST+PATH, cli_list_path);
1250         set_handler_callback(LIST+MAPS, cli_list_maps);
1251         set_handler_callback(LIST+STATUS, cli_list_status);
1252         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1253         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1254         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1255         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1256         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1257         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1258         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1259         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1260         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1261         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1262         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1263         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1264         set_handler_callback(LIST+CONFIG, cli_list_config);
1265         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1266         set_handler_callback(LIST+DEVICES, cli_list_devices);
1267         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1268         set_handler_callback(RESET+MAPS+STATS, cli_reset_maps_stats);
1269         set_handler_callback(RESET+MAP+STATS, cli_reset_map_stats);
1270         set_handler_callback(ADD+PATH, cli_add_path);
1271         set_handler_callback(DEL+PATH, cli_del_path);
1272         set_handler_callback(ADD+MAP, cli_add_map);
1273         set_handler_callback(DEL+MAP, cli_del_map);
1274         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1275         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1276         set_handler_callback(SUSPEND+MAP, cli_suspend);
1277         set_handler_callback(RESUME+MAP, cli_resume);
1278         set_handler_callback(RESIZE+MAP, cli_resize);
1279         set_handler_callback(RELOAD+MAP, cli_reload);
1280         set_handler_callback(RESET+MAP, cli_reassign);
1281         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1282         set_handler_callback(FAIL+PATH, cli_fail);
1283         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1284         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1285         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1286         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1287         set_unlocked_handler_callback(QUIT, cli_quit);
1288         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1289         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1290         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1291         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1292         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1293         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1294         set_handler_callback(GETPRKEY+MAP, cli_getprkey);
1295         set_handler_callback(SETPRKEY+MAP+KEY, cli_setprkey);
1296         set_handler_callback(UNSETPRKEY+MAP, cli_unsetprkey);
1297
1298         umask(077);
1299         uxsock_listen(&uxsock_trigger, ap);
1300         pthread_cleanup_pop(1);
1301         return NULL;
1302 }
1303
1304 void
1305 exit_daemon (void)
1306 {
1307         post_config_state(DAEMON_SHUTDOWN);
1308 }
1309
1310 static void
1311 fail_path (struct path * pp, int del_active)
1312 {
1313         if (!pp->mpp)
1314                 return;
1315
1316         condlog(2, "checker failed path %s in map %s",
1317                  pp->dev_t, pp->mpp->alias);
1318
1319         dm_fail_path(pp->mpp->alias, pp->dev_t);
1320         if (del_active)
1321                 update_queue_mode_del_path(pp->mpp);
1322 }
1323
1324 /*
1325  * caller must have locked the path list before calling that function
1326  */
1327 static int
1328 reinstate_path (struct path * pp, int add_active)
1329 {
1330         int ret = 0;
1331
1332         if (!pp->mpp)
1333                 return 0;
1334
1335         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1336                 condlog(0, "%s: reinstate failed", pp->dev_t);
1337                 ret = 1;
1338         } else {
1339                 condlog(2, "%s: reinstated", pp->dev_t);
1340                 if (add_active)
1341                         update_queue_mode_add_path(pp->mpp);
1342         }
1343         return ret;
1344 }
1345
1346 static void
1347 enable_group(struct path * pp)
1348 {
1349         struct pathgroup * pgp;
1350
1351         /*
1352          * if path is added through uev_add_path, pgindex can be unset.
1353          * next update_strings() will set it, upon map reload event.
1354          *
1355          * we can safely return here, because upon map reload, all
1356          * PG will be enabled.
1357          */
1358         if (!pp->mpp->pg || !pp->pgindex)
1359                 return;
1360
1361         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1362
1363         if (pgp->status == PGSTATE_DISABLED) {
1364                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1365                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1366         }
1367 }
1368
1369 static void
1370 mpvec_garbage_collector (struct vectors * vecs)
1371 {
1372         struct multipath * mpp;
1373         unsigned int i;
1374
1375         if (!vecs->mpvec)
1376                 return;
1377
1378         vector_foreach_slot (vecs->mpvec, mpp, i) {
1379                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1380                         condlog(2, "%s: remove dead map", mpp->alias);
1381                         remove_map_and_stop_waiter(mpp, vecs, 1);
1382                         i--;
1383                 }
1384         }
1385 }
1386
1387 /* This is called after a path has started working again. It the multipath
1388  * device for this path uses the followover failback type, and this is the
1389  * best pathgroup, and this is the first path in the pathgroup to come back
1390  * up, then switch to this pathgroup */
1391 static int
1392 followover_should_failback(struct path * pp)
1393 {
1394         struct pathgroup * pgp;
1395         struct path *pp1;
1396         int i;
1397
1398         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1399             !pp->mpp->pg || !pp->pgindex ||
1400             pp->pgindex != pp->mpp->bestpg)
1401                 return 0;
1402
1403         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1404         vector_foreach_slot(pgp->paths, pp1, i) {
1405                 if (pp1 == pp)
1406                         continue;
1407                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1408                         return 0;
1409         }
1410         return 1;
1411 }
1412
1413 static void
1414 missing_uev_wait_tick(struct vectors *vecs)
1415 {
1416         struct multipath * mpp;
1417         unsigned int i;
1418         int timed_out = 0, delayed_reconfig;
1419         struct config *conf;
1420
1421         vector_foreach_slot (vecs->mpvec, mpp, i) {
1422                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1423                         timed_out = 1;
1424                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1425                         if (mpp->wait_for_udev > 1 && update_map(mpp, vecs)) {
1426                                 /* update_map removed map */
1427                                 i--;
1428                                 continue;
1429                         }
1430                         mpp->wait_for_udev = 0;
1431                 }
1432         }
1433
1434         conf = get_multipath_config();
1435         delayed_reconfig = conf->delayed_reconfig;
1436         put_multipath_config(conf);
1437         if (timed_out && delayed_reconfig &&
1438             !need_to_delay_reconfig(vecs)) {
1439                 condlog(2, "reconfigure (delayed)");
1440                 set_config_state(DAEMON_CONFIGURE);
1441         }
1442 }
1443
1444 static void
1445 ghost_delay_tick(struct vectors *vecs)
1446 {
1447         struct multipath * mpp;
1448         unsigned int i;
1449
1450         vector_foreach_slot (vecs->mpvec, mpp, i) {
1451                 if (mpp->ghost_delay_tick <= 0)
1452                         continue;
1453                 if (--mpp->ghost_delay_tick <= 0) {
1454                         condlog(0, "%s: timed out waiting for active path",
1455                                 mpp->alias);
1456                         mpp->force_udev_reload = 1;
1457                         if (update_map(mpp, vecs) != 0) {
1458                                 /* update_map removed map */
1459                                 i--;
1460                                 continue;
1461                         }
1462                 }
1463         }
1464 }
1465
1466 static void
1467 defered_failback_tick (vector mpvec)
1468 {
1469         struct multipath * mpp;
1470         unsigned int i;
1471
1472         vector_foreach_slot (mpvec, mpp, i) {
1473                 /*
1474                  * deferred failback getting sooner
1475                  */
1476                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1477                         mpp->failback_tick--;
1478
1479                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1480                                 switch_pathgroup(mpp);
1481                 }
1482         }
1483 }
1484
1485 static void
1486 retry_count_tick(vector mpvec)
1487 {
1488         struct multipath *mpp;
1489         unsigned int i;
1490
1491         vector_foreach_slot (mpvec, mpp, i) {
1492                 if (mpp->retry_tick > 0) {
1493                         mpp->stat_total_queueing_time++;
1494                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1495                         if(--mpp->retry_tick == 0) {
1496                                 mpp->stat_map_failures++;
1497                                 dm_queue_if_no_path(mpp->alias, 0);
1498                                 condlog(2, "%s: Disable queueing", mpp->alias);
1499                         }
1500                 }
1501         }
1502 }
1503
1504 int update_prio(struct path *pp, int refresh_all)
1505 {
1506         int oldpriority;
1507         struct path *pp1;
1508         struct pathgroup * pgp;
1509         int i, j, changed = 0;
1510         struct config *conf;
1511
1512         if (refresh_all) {
1513                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1514                         vector_foreach_slot (pgp->paths, pp1, j) {
1515                                 oldpriority = pp1->priority;
1516                                 conf = get_multipath_config();
1517                                 pathinfo(pp1, conf, DI_PRIO);
1518                                 put_multipath_config(conf);
1519                                 if (pp1->priority != oldpriority)
1520                                         changed = 1;
1521                         }
1522                 }
1523                 return changed;
1524         }
1525         oldpriority = pp->priority;
1526         conf = get_multipath_config();
1527         if (pp->state != PATH_DOWN)
1528                 pathinfo(pp, conf, DI_PRIO);
1529         put_multipath_config(conf);
1530
1531         if (pp->priority == oldpriority)
1532                 return 0;
1533         return 1;
1534 }
1535
1536 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1537 {
1538         if (reload_map(vecs, mpp, refresh, 1))
1539                 return 1;
1540
1541         dm_lib_release();
1542         if (setup_multipath(vecs, mpp) != 0)
1543                 return 1;
1544         sync_map_state(mpp);
1545
1546         return 0;
1547 }
1548
1549 void repair_path(struct path * pp)
1550 {
1551         if (pp->state != PATH_DOWN)
1552                 return;
1553
1554         checker_repair(&pp->checker);
1555         LOG_MSG(1, checker_message(&pp->checker));
1556 }
1557
1558 /*
1559  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1560  * and '0' otherwise
1561  */
1562 int
1563 check_path (struct vectors * vecs, struct path * pp, int ticks)
1564 {
1565         int newstate;
1566         int new_path_up = 0;
1567         int chkr_new_path_up = 0;
1568         int add_active;
1569         int disable_reinstate = 0;
1570         int oldchkrstate = pp->chkrstate;
1571         int retrigger_tries, checkint;
1572         struct config *conf;
1573         int ret;
1574
1575         if ((pp->initialized == INIT_OK ||
1576              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1577                 return 0;
1578
1579         if (pp->tick)
1580                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1581         if (pp->tick)
1582                 return 0; /* don't check this path yet */
1583
1584         conf = get_multipath_config();
1585         retrigger_tries = conf->retrigger_tries;
1586         checkint = conf->checkint;
1587         put_multipath_config(conf);
1588         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1589             pp->retriggers < retrigger_tries) {
1590                 condlog(2, "%s: triggering change event to reinitialize",
1591                         pp->dev);
1592                 pp->initialized = INIT_REQUESTED_UDEV;
1593                 pp->retriggers++;
1594                 sysfs_attr_set_value(pp->udev, "uevent", "change",
1595                                      strlen("change"));
1596                 return 0;
1597         }
1598
1599         /*
1600          * provision a next check soonest,
1601          * in case we exit abnormaly from here
1602          */
1603         pp->tick = checkint;
1604
1605         newstate = path_offline(pp);
1606         /*
1607          * Wait for uevent for removed paths;
1608          * some LLDDs like zfcp keep paths unavailable
1609          * without sending uevents.
1610          */
1611         if (newstate == PATH_REMOVED)
1612                 newstate = PATH_DOWN;
1613
1614         if (newstate == PATH_UP) {
1615                 conf = get_multipath_config();
1616                 newstate = get_state(pp, conf, 1, newstate);
1617                 put_multipath_config(conf);
1618         } else
1619                 checker_clear_message(&pp->checker);
1620
1621         if (pp->wwid_changed) {
1622                 condlog(2, "%s: path wwid has changed. Refusing to use",
1623                         pp->dev);
1624                 newstate = PATH_DOWN;
1625         }
1626
1627         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1628                 condlog(2, "%s: unusable path", pp->dev);
1629                 conf = get_multipath_config();
1630                 pathinfo(pp, conf, 0);
1631                 put_multipath_config(conf);
1632                 return 1;
1633         }
1634         if (!pp->mpp) {
1635                 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1636                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1637                         condlog(2, "%s: add missing path", pp->dev);
1638                         conf = get_multipath_config();
1639                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1640                         if (ret == PATHINFO_OK) {
1641                                 ev_add_path(pp, vecs, 1);
1642                                 pp->tick = 1;
1643                         } else if (ret == PATHINFO_SKIPPED) {
1644                                 put_multipath_config(conf);
1645                                 return -1;
1646                         }
1647                         put_multipath_config(conf);
1648                 }
1649                 return 0;
1650         }
1651         /*
1652          * Async IO in flight. Keep the previous path state
1653          * and reschedule as soon as possible
1654          */
1655         if (newstate == PATH_PENDING) {
1656                 pp->tick = 1;
1657                 return 0;
1658         }
1659         /*
1660          * Synchronize with kernel state
1661          */
1662         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1663                 condlog(1, "%s: Could not synchronize with kernel state",
1664                         pp->dev);
1665                 pp->dmstate = PSTATE_UNDEF;
1666         }
1667         /* if update_multipath_strings orphaned the path, quit early */
1668         if (!pp->mpp)
1669                 return 0;
1670
1671         if (pp->io_err_disable_reinstate && hit_io_err_recheck_time(pp)) {
1672                 pp->state = PATH_SHAKY;
1673                 /*
1674                  * to reschedule as soon as possible,so that this path can
1675                  * be recoverd in time
1676                  */
1677                 pp->tick = 1;
1678                 return 1;
1679         }
1680
1681         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1682              pp->wait_checks > 0) {
1683                 if (pp->mpp->nr_active > 0) {
1684                         pp->state = PATH_DELAYED;
1685                         pp->wait_checks--;
1686                         return 1;
1687                 } else
1688                         pp->wait_checks = 0;
1689         }
1690
1691         /*
1692          * don't reinstate failed path, if its in stand-by
1693          * and if target supports only implicit tpgs mode.
1694          * this will prevent unnecessary i/o by dm on stand-by
1695          * paths if there are no other active paths in map.
1696          */
1697         disable_reinstate = (newstate == PATH_GHOST &&
1698                             pp->mpp->nr_active == 0 &&
1699                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1700
1701         pp->chkrstate = newstate;
1702         if (newstate != pp->state) {
1703                 int oldstate = pp->state;
1704                 pp->state = newstate;
1705
1706                 LOG_MSG(1, checker_message(&pp->checker));
1707
1708                 /*
1709                  * upon state change, reset the checkint
1710                  * to the shortest delay
1711                  */
1712                 conf = get_multipath_config();
1713                 pp->checkint = conf->checkint;
1714                 put_multipath_config(conf);
1715
1716                 if (newstate != PATH_UP && newstate != PATH_GHOST) {
1717                         /*
1718                          * proactively fail path in the DM
1719                          */
1720                         if (oldstate == PATH_UP ||
1721                             oldstate == PATH_GHOST) {
1722                                 fail_path(pp, 1);
1723                                 if (pp->mpp->delay_wait_checks > 0 &&
1724                                     pp->watch_checks > 0) {
1725                                         pp->wait_checks = pp->mpp->delay_wait_checks;
1726                                         pp->watch_checks = 0;
1727                                 }
1728                         }else
1729                                 fail_path(pp, 0);
1730
1731                         /*
1732                          * cancel scheduled failback
1733                          */
1734                         pp->mpp->failback_tick = 0;
1735
1736                         pp->mpp->stat_path_failures++;
1737                         repair_path(pp);
1738                         return 1;
1739                 }
1740
1741                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1742                         if ( pp->mpp && pp->mpp->prflag ){
1743                                 /*
1744                                  * Check Persistent Reservation.
1745                                  */
1746                         condlog(2, "%s: checking persistent reservation "
1747                                 "registration", pp->dev);
1748                         mpath_pr_event_handle(pp);
1749                         }
1750                 }
1751
1752                 /*
1753                  * reinstate this path
1754                  */
1755                 if (oldstate != PATH_UP &&
1756                     oldstate != PATH_GHOST) {
1757                         if (pp->mpp->delay_watch_checks > 0)
1758                                 pp->watch_checks = pp->mpp->delay_watch_checks;
1759                         add_active = 1;
1760                 } else {
1761                         if (pp->watch_checks > 0)
1762                                 pp->watch_checks--;
1763                         add_active = 0;
1764                 }
1765                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
1766                         condlog(3, "%s: reload map", pp->dev);
1767                         ev_add_path(pp, vecs, 1);
1768                         pp->tick = 1;
1769                         return 0;
1770                 }
1771                 new_path_up = 1;
1772
1773                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1774                         chkr_new_path_up = 1;
1775
1776                 /*
1777                  * if at least one path is up in a group, and
1778                  * the group is disabled, re-enable it
1779                  */
1780                 if (newstate == PATH_UP)
1781                         enable_group(pp);
1782         }
1783         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1784                 if ((pp->dmstate == PSTATE_FAILED ||
1785                     pp->dmstate == PSTATE_UNDEF) &&
1786                     !disable_reinstate) {
1787                         /* Clear IO errors */
1788                         if (reinstate_path(pp, 0)) {
1789                                 condlog(3, "%s: reload map", pp->dev);
1790                                 ev_add_path(pp, vecs, 1);
1791                                 pp->tick = 1;
1792                                 return 0;
1793                         }
1794                 } else {
1795                         unsigned int max_checkint;
1796                         LOG_MSG(4, checker_message(&pp->checker));
1797                         conf = get_multipath_config();
1798                         max_checkint = conf->max_checkint;
1799                         put_multipath_config(conf);
1800                         if (pp->checkint != max_checkint) {
1801                                 /*
1802                                  * double the next check delay.
1803                                  * max at conf->max_checkint
1804                                  */
1805                                 if (pp->checkint < (max_checkint / 2))
1806                                         pp->checkint = 2 * pp->checkint;
1807                                 else
1808                                         pp->checkint = max_checkint;
1809
1810                                 condlog(4, "%s: delay next check %is",
1811                                         pp->dev_t, pp->checkint);
1812                         }
1813                         if (pp->watch_checks > 0)
1814                                 pp->watch_checks--;
1815                         pp->tick = pp->checkint;
1816                 }
1817         }
1818         else if (newstate != PATH_UP && newstate != PATH_GHOST) {
1819                 if (pp->dmstate == PSTATE_ACTIVE ||
1820                     pp->dmstate == PSTATE_UNDEF)
1821                         fail_path(pp, 0);
1822                 if (newstate == PATH_DOWN) {
1823                         int log_checker_err;
1824
1825                         conf = get_multipath_config();
1826                         log_checker_err = conf->log_checker_err;
1827                         put_multipath_config(conf);
1828                         if (log_checker_err == LOG_CHKR_ERR_ONCE)
1829                                 LOG_MSG(3, checker_message(&pp->checker));
1830                         else
1831                                 LOG_MSG(2, checker_message(&pp->checker));
1832                 }
1833         }
1834
1835         pp->state = newstate;
1836         repair_path(pp);
1837
1838         if (pp->mpp->wait_for_udev)
1839                 return 1;
1840         /*
1841          * path prio refreshing
1842          */
1843         condlog(4, "path prio refresh");
1844
1845         if (update_prio(pp, new_path_up) &&
1846             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1847              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1848                 update_path_groups(pp->mpp, vecs, !new_path_up);
1849         else if (need_switch_pathgroup(pp->mpp, 0)) {
1850                 if (pp->mpp->pgfailback > 0 &&
1851                     (new_path_up || pp->mpp->failback_tick <= 0))
1852                         pp->mpp->failback_tick =
1853                                 pp->mpp->pgfailback + 1;
1854                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1855                          (chkr_new_path_up && followover_should_failback(pp)))
1856                         switch_pathgroup(pp->mpp);
1857         }
1858         return 1;
1859 }
1860
1861 static void init_path_check_interval(struct vectors *vecs)
1862 {
1863         struct config *conf;
1864         struct path *pp;
1865         unsigned int i;
1866
1867         vector_foreach_slot (vecs->pathvec, pp, i) {
1868                 conf = get_multipath_config();
1869                 pp->checkint = conf->checkint;
1870                 put_multipath_config(conf);
1871         }
1872 }
1873
1874 static void *
1875 checkerloop (void *ap)
1876 {
1877         struct vectors *vecs;
1878         struct path *pp;
1879         int count = 0;
1880         unsigned int i;
1881         struct timespec last_time;
1882         struct config *conf;
1883
1884         pthread_cleanup_push(rcu_unregister, NULL);
1885         rcu_register_thread();
1886         mlockall(MCL_CURRENT | MCL_FUTURE);
1887         vecs = (struct vectors *)ap;
1888         condlog(2, "path checkers start up");
1889
1890         /* Tweak start time for initial path check */
1891         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
1892                 last_time.tv_sec = 0;
1893         else
1894                 last_time.tv_sec -= 1;
1895
1896         while (1) {
1897                 struct timespec diff_time, start_time, end_time;
1898                 int num_paths = 0, ticks = 0, strict_timing, rc = 0;
1899
1900                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
1901                         start_time.tv_sec = 0;
1902                 if (start_time.tv_sec && last_time.tv_sec) {
1903                         timespecsub(&start_time, &last_time, &diff_time);
1904                         condlog(4, "tick (%lu.%06lu secs)",
1905                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
1906                         last_time = start_time;
1907                         ticks = diff_time.tv_sec;
1908                 } else {
1909                         ticks = 1;
1910                         condlog(4, "tick (%d ticks)", ticks);
1911                 }
1912 #ifdef USE_SYSTEMD
1913                 if (use_watchdog)
1914                         sd_notify(0, "WATCHDOG=1");
1915 #endif
1916                 rc = set_config_state(DAEMON_RUNNING);
1917                 if (rc == ETIMEDOUT) {
1918                         condlog(4, "timeout waiting for DAEMON_IDLE");
1919                         continue;
1920                 }
1921
1922                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1923                 lock(&vecs->lock);
1924                 pthread_testcancel();
1925                 vector_foreach_slot (vecs->pathvec, pp, i) {
1926                         rc = check_path(vecs, pp, ticks);
1927                         if (rc < 0) {
1928                                 vector_del_slot(vecs->pathvec, i);
1929                                 free_path(pp);
1930                                 i--;
1931                         } else
1932                                 num_paths += rc;
1933                 }
1934                 lock_cleanup_pop(vecs->lock);
1935
1936                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1937                 lock(&vecs->lock);
1938                 pthread_testcancel();
1939                 defered_failback_tick(vecs->mpvec);
1940                 retry_count_tick(vecs->mpvec);
1941                 missing_uev_wait_tick(vecs);
1942                 ghost_delay_tick(vecs);
1943                 lock_cleanup_pop(vecs->lock);
1944
1945                 if (count)
1946                         count--;
1947                 else {
1948                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1949                         lock(&vecs->lock);
1950                         pthread_testcancel();
1951                         condlog(4, "map garbage collection");
1952                         mpvec_garbage_collector(vecs);
1953                         count = MAPGCINT;
1954                         lock_cleanup_pop(vecs->lock);
1955                 }
1956
1957                 diff_time.tv_nsec = 0;
1958                 if (start_time.tv_sec &&
1959                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
1960                         timespecsub(&end_time, &start_time, &diff_time);
1961                         if (num_paths) {
1962                                 unsigned int max_checkint;
1963
1964                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
1965                                         num_paths, num_paths > 1 ? "s" : "",
1966                                         diff_time.tv_sec,
1967                                         diff_time.tv_nsec / 1000);
1968                                 conf = get_multipath_config();
1969                                 max_checkint = conf->max_checkint;
1970                                 put_multipath_config(conf);
1971                                 if (diff_time.tv_sec > max_checkint)
1972                                         condlog(1, "path checkers took longer "
1973                                                 "than %lu seconds, consider "
1974                                                 "increasing max_polling_interval",
1975                                                 diff_time.tv_sec);
1976                         }
1977                 }
1978                 check_foreign();
1979                 post_config_state(DAEMON_IDLE);
1980                 conf = get_multipath_config();
1981                 strict_timing = conf->strict_timing;
1982                 put_multipath_config(conf);
1983                 if (!strict_timing)
1984                         sleep(1);
1985                 else {
1986                         if (diff_time.tv_nsec) {
1987                                 diff_time.tv_sec = 0;
1988                                 diff_time.tv_nsec =
1989                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
1990                         } else
1991                                 diff_time.tv_sec = 1;
1992
1993                         condlog(3, "waiting for %lu.%06lu secs",
1994                                 diff_time.tv_sec,
1995                                 diff_time.tv_nsec / 1000);
1996                         if (nanosleep(&diff_time, NULL) != 0) {
1997                                 condlog(3, "nanosleep failed with error %d",
1998                                         errno);
1999                                 conf = get_multipath_config();
2000                                 conf->strict_timing = 0;
2001                                 put_multipath_config(conf);
2002                                 break;
2003                         }
2004                 }
2005         }
2006         pthread_cleanup_pop(1);
2007         return NULL;
2008 }
2009
2010 int
2011 configure (struct vectors * vecs)
2012 {
2013         struct multipath * mpp;
2014         struct path * pp;
2015         vector mpvec;
2016         int i, ret;
2017         struct config *conf;
2018         static int force_reload = FORCE_RELOAD_WEAK;
2019
2020         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2021                 condlog(0, "couldn't allocate path vec in configure");
2022                 return 1;
2023         }
2024
2025         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2026                 condlog(0, "couldn't allocate multipath vec in configure");
2027                 return 1;
2028         }
2029
2030         if (!(mpvec = vector_alloc())) {
2031                 condlog(0, "couldn't allocate new maps vec in configure");
2032                 return 1;
2033         }
2034
2035         /*
2036          * probe for current path (from sysfs) and map (from dm) sets
2037          */
2038         ret = path_discovery(vecs->pathvec, DI_ALL);
2039         if (ret < 0) {
2040                 condlog(0, "configure failed at path discovery");
2041                 return 1;
2042         }
2043
2044         vector_foreach_slot (vecs->pathvec, pp, i){
2045                 conf = get_multipath_config();
2046                 if (filter_path(conf, pp) > 0){
2047                         vector_del_slot(vecs->pathvec, i);
2048                         free_path(pp);
2049                         i--;
2050                 }
2051                 else
2052                         pp->checkint = conf->checkint;
2053                 put_multipath_config(conf);
2054         }
2055         if (map_discovery(vecs)) {
2056                 condlog(0, "configure failed at map discovery");
2057                 return 1;
2058         }
2059
2060         /*
2061          * create new set of maps & push changed ones into dm
2062          * In the first call, use FORCE_RELOAD_WEAK to avoid making
2063          * superfluous ACT_RELOAD ioctls. Later calls are done
2064          * with FORCE_RELOAD_YES.
2065          */
2066         ret = coalesce_paths(vecs, mpvec, NULL, force_reload, CMD_NONE);
2067         if (force_reload == FORCE_RELOAD_WEAK)
2068                 force_reload = FORCE_RELOAD_YES;
2069         if (ret) {
2070                 condlog(0, "configure failed while coalescing paths");
2071                 return 1;
2072         }
2073
2074         /*
2075          * may need to remove some maps which are no longer relevant
2076          * e.g., due to blacklist changes in conf file
2077          */
2078         if (coalesce_maps(vecs, mpvec)) {
2079                 condlog(0, "configure failed while coalescing maps");
2080                 return 1;
2081         }
2082
2083         dm_lib_release();
2084
2085         sync_maps_state(mpvec);
2086         vector_foreach_slot(mpvec, mpp, i){
2087                 remember_wwid(mpp->wwid);
2088                 update_map_pr(mpp);
2089         }
2090
2091         /*
2092          * purge dm of old maps
2093          */
2094         remove_maps(vecs);
2095
2096         /*
2097          * save new set of maps formed by considering current path state
2098          */
2099         vector_free(vecs->mpvec);
2100         vecs->mpvec = mpvec;
2101
2102         /*
2103          * start dm event waiter threads for these new maps
2104          */
2105         vector_foreach_slot(vecs->mpvec, mpp, i) {
2106                 if (setup_multipath(vecs, mpp)) {
2107                         i--;
2108                         continue;
2109                 }
2110                 if (start_waiter_thread(mpp, vecs)) {
2111                         remove_map(mpp, vecs, 1);
2112                         i--;
2113                 }
2114         }
2115         return 0;
2116 }
2117
2118 int
2119 need_to_delay_reconfig(struct vectors * vecs)
2120 {
2121         struct multipath *mpp;
2122         int i;
2123
2124         if (!VECTOR_SIZE(vecs->mpvec))
2125                 return 0;
2126
2127         vector_foreach_slot(vecs->mpvec, mpp, i) {
2128                 if (mpp->wait_for_udev)
2129                         return 1;
2130         }
2131         return 0;
2132 }
2133
2134 void rcu_free_config(struct rcu_head *head)
2135 {
2136         struct config *conf = container_of(head, struct config, rcu);
2137
2138         free_config(conf);
2139 }
2140
2141 int
2142 reconfigure (struct vectors * vecs)
2143 {
2144         struct config * old, *conf;
2145
2146         conf = load_config(DEFAULT_CONFIGFILE);
2147         if (!conf)
2148                 return 1;
2149
2150         /*
2151          * free old map and path vectors ... they use old conf state
2152          */
2153         if (VECTOR_SIZE(vecs->mpvec))
2154                 remove_maps_and_stop_waiters(vecs);
2155
2156         free_pathvec(vecs->pathvec, FREE_PATHS);
2157         vecs->pathvec = NULL;
2158         delete_all_foreign();
2159
2160         /* Re-read any timezone changes */
2161         tzset();
2162
2163         dm_drv_version(conf->version, TGT_MPATH);
2164         if (verbosity)
2165                 conf->verbosity = verbosity;
2166         if (bindings_read_only)
2167                 conf->bindings_read_only = bindings_read_only;
2168         if (conf->find_multipaths) {
2169                 condlog(2, "find_multipaths is set: -n is implied");
2170                 ignore_new_devs = 1;
2171         }
2172         if (ignore_new_devs)
2173                 conf->ignore_new_devs = ignore_new_devs;
2174         uxsock_timeout = conf->uxsock_timeout;
2175
2176         old = rcu_dereference(multipath_conf);
2177         rcu_assign_pointer(multipath_conf, conf);
2178         call_rcu(&old->rcu, rcu_free_config);
2179
2180         configure(vecs);
2181
2182
2183         return 0;
2184 }
2185
2186 static struct vectors *
2187 init_vecs (void)
2188 {
2189         struct vectors * vecs;
2190
2191         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2192
2193         if (!vecs)
2194                 return NULL;
2195
2196         pthread_mutex_init(&vecs->lock.mutex, NULL);
2197
2198         return vecs;
2199 }
2200
2201 static void *
2202 signal_set(int signo, void (*func) (int))
2203 {
2204         int r;
2205         struct sigaction sig;
2206         struct sigaction osig;
2207
2208         sig.sa_handler = func;
2209         sigemptyset(&sig.sa_mask);
2210         sig.sa_flags = 0;
2211
2212         r = sigaction(signo, &sig, &osig);
2213
2214         if (r < 0)
2215                 return (SIG_ERR);
2216         else
2217                 return (osig.sa_handler);
2218 }
2219
2220 void
2221 handle_signals(bool nonfatal)
2222 {
2223         if (exit_sig) {
2224                 condlog(2, "exit (signal)");
2225                 exit_sig = 0;
2226                 exit_daemon();
2227         }
2228         if (!nonfatal)
2229                 return;
2230         if (reconfig_sig) {
2231                 condlog(2, "reconfigure (signal)");
2232                 set_config_state(DAEMON_CONFIGURE);
2233         }
2234         if (log_reset_sig) {
2235                 condlog(2, "reset log (signal)");
2236                 if (logsink == 1)
2237                         log_thread_reset();
2238         }
2239         reconfig_sig = 0;
2240         log_reset_sig = 0;
2241 }
2242
2243 static void
2244 sighup (int sig)
2245 {
2246         reconfig_sig = 1;
2247 }
2248
2249 static void
2250 sigend (int sig)
2251 {
2252         exit_sig = 1;
2253 }
2254
2255 static void
2256 sigusr1 (int sig)
2257 {
2258         log_reset_sig = 1;
2259 }
2260
2261 static void
2262 sigusr2 (int sig)
2263 {
2264         condlog(3, "SIGUSR2 received");
2265 }
2266
2267 static void
2268 signal_init(void)
2269 {
2270         sigset_t set;
2271
2272         /* block all signals */
2273         sigfillset(&set);
2274         /* SIGPIPE occurs if logging fails */
2275         sigdelset(&set, SIGPIPE);
2276         pthread_sigmask(SIG_SETMASK, &set, NULL);
2277
2278         /* Other signals will be unblocked in the uxlsnr thread */
2279         signal_set(SIGHUP, sighup);
2280         signal_set(SIGUSR1, sigusr1);
2281         signal_set(SIGUSR2, sigusr2);
2282         signal_set(SIGINT, sigend);
2283         signal_set(SIGTERM, sigend);
2284         signal_set(SIGPIPE, sigend);
2285 }
2286
2287 static void
2288 setscheduler (void)
2289 {
2290         int res;
2291         static struct sched_param sched_param = {
2292                 .sched_priority = 99
2293         };
2294
2295         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2296
2297         if (res == -1)
2298                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2299         return;
2300 }
2301
2302 static void
2303 set_oom_adj (void)
2304 {
2305 #ifdef OOM_SCORE_ADJ_MIN
2306         int retry = 1;
2307         char *file = "/proc/self/oom_score_adj";
2308         int score = OOM_SCORE_ADJ_MIN;
2309 #else
2310         int retry = 0;
2311         char *file = "/proc/self/oom_adj";
2312         int score = OOM_ADJUST_MIN;
2313 #endif
2314         FILE *fp;
2315         struct stat st;
2316         char *envp;
2317
2318         envp = getenv("OOMScoreAdjust");
2319         if (envp) {
2320                 condlog(3, "Using systemd provided OOMScoreAdjust");
2321                 return;
2322         }
2323         do {
2324                 if (stat(file, &st) == 0){
2325                         fp = fopen(file, "w");
2326                         if (!fp) {
2327                                 condlog(0, "couldn't fopen %s : %s", file,
2328                                         strerror(errno));
2329                                 return;
2330                         }
2331                         fprintf(fp, "%i", score);
2332                         fclose(fp);
2333                         return;
2334                 }
2335                 if (errno != ENOENT) {
2336                         condlog(0, "couldn't stat %s : %s", file,
2337                                 strerror(errno));
2338                         return;
2339                 }
2340 #ifdef OOM_ADJUST_MIN
2341                 file = "/proc/self/oom_adj";
2342                 score = OOM_ADJUST_MIN;
2343 #else
2344                 retry = 0;
2345 #endif
2346         } while (retry--);
2347         condlog(0, "couldn't adjust oom score");
2348 }
2349
2350 static int
2351 child (void * param)
2352 {
2353         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
2354         pthread_attr_t log_attr, misc_attr, uevent_attr;
2355         struct vectors * vecs;
2356         struct multipath * mpp;
2357         int i;
2358 #ifdef USE_SYSTEMD
2359         unsigned long checkint;
2360         int startup_done = 0;
2361 #endif
2362         int rc;
2363         int pid_fd = -1;
2364         struct config *conf;
2365         char *envp;
2366
2367         mlockall(MCL_CURRENT | MCL_FUTURE);
2368         signal_init();
2369         rcu_init();
2370
2371         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2372         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2373         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2374         setup_thread_attr(&io_err_stat_attr, 32 * 1024, 0);
2375
2376         if (logsink == 1) {
2377                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2378                 log_thread_start(&log_attr);
2379                 pthread_attr_destroy(&log_attr);
2380         }
2381         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2382         if (pid_fd < 0) {
2383                 condlog(1, "failed to create pidfile");
2384                 if (logsink == 1)
2385                         log_thread_stop();
2386                 exit(1);
2387         }
2388
2389         post_config_state(DAEMON_START);
2390
2391         condlog(2, "--------start up--------");
2392         condlog(2, "read " DEFAULT_CONFIGFILE);
2393
2394         conf = load_config(DEFAULT_CONFIGFILE);
2395         if (!conf)
2396                 goto failed;
2397
2398         if (verbosity)
2399                 conf->verbosity = verbosity;
2400         if (bindings_read_only)
2401                 conf->bindings_read_only = bindings_read_only;
2402         if (ignore_new_devs)
2403                 conf->ignore_new_devs = ignore_new_devs;
2404         uxsock_timeout = conf->uxsock_timeout;
2405         rcu_assign_pointer(multipath_conf, conf);
2406         if (init_checkers(conf->multipath_dir)) {
2407                 condlog(0, "failed to initialize checkers");
2408                 goto failed;
2409         }
2410         if (init_prio(conf->multipath_dir)) {
2411                 condlog(0, "failed to initialize prioritizers");
2412                 goto failed;
2413         }
2414         /* Failing this is non-fatal */
2415
2416         init_foreign(conf->multipath_dir);
2417
2418         setlogmask(LOG_UPTO(conf->verbosity + 3));
2419
2420         envp = getenv("LimitNOFILE");
2421
2422         if (envp) {
2423                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2424         } else if (conf->max_fds) {
2425                 struct rlimit fd_limit;
2426
2427                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2428                         condlog(0, "can't get open fds limit: %s",
2429                                 strerror(errno));
2430                         fd_limit.rlim_cur = 0;
2431                         fd_limit.rlim_max = 0;
2432                 }
2433                 if (fd_limit.rlim_cur < conf->max_fds) {
2434                         fd_limit.rlim_cur = conf->max_fds;
2435                         if (fd_limit.rlim_max < conf->max_fds)
2436                                 fd_limit.rlim_max = conf->max_fds;
2437                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2438                                 condlog(0, "can't set open fds limit to "
2439                                         "%lu/%lu : %s",
2440                                         fd_limit.rlim_cur, fd_limit.rlim_max,
2441                                         strerror(errno));
2442                         } else {
2443                                 condlog(3, "set open fds limit to %lu/%lu",
2444                                         fd_limit.rlim_cur, fd_limit.rlim_max);
2445                         }
2446                 }
2447
2448         }
2449
2450         vecs = gvecs = init_vecs();
2451         if (!vecs)
2452                 goto failed;
2453
2454         setscheduler();
2455         set_oom_adj();
2456
2457 #ifdef USE_SYSTEMD
2458         envp = getenv("WATCHDOG_USEC");
2459         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2460                 /* Value is in microseconds */
2461                 conf->max_checkint = checkint / 1000000;
2462                 /* Rescale checkint */
2463                 if (conf->checkint > conf->max_checkint)
2464                         conf->checkint = conf->max_checkint;
2465                 else
2466                         conf->checkint = conf->max_checkint / 4;
2467                 condlog(3, "enabling watchdog, interval %d max %d",
2468                         conf->checkint, conf->max_checkint);
2469                 use_watchdog = conf->checkint;
2470         }
2471 #endif
2472         /*
2473          * Startup done, invalidate configuration
2474          */
2475         conf = NULL;
2476
2477         /*
2478          * Signal start of configuration
2479          */
2480         post_config_state(DAEMON_CONFIGURE);
2481
2482         init_path_check_interval(vecs);
2483
2484         /*
2485          * Start uevent listener early to catch events
2486          */
2487         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2488                 condlog(0, "failed to create uevent thread: %d", rc);
2489                 goto failed;
2490         }
2491         pthread_attr_destroy(&uevent_attr);
2492         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2493                 condlog(0, "failed to create cli listener: %d", rc);
2494                 goto failed;
2495         }
2496
2497         /*
2498          * start threads
2499          */
2500         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2501                 condlog(0,"failed to create checker loop thread: %d", rc);
2502                 goto failed;
2503         }
2504         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2505                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2506                 goto failed;
2507         }
2508         pthread_attr_destroy(&misc_attr);
2509
2510         while (running_state != DAEMON_SHUTDOWN) {
2511                 pthread_cleanup_push(config_cleanup, NULL);
2512                 pthread_mutex_lock(&config_lock);
2513                 if (running_state != DAEMON_CONFIGURE &&
2514                     running_state != DAEMON_SHUTDOWN) {
2515                         pthread_cond_wait(&config_cond, &config_lock);
2516                 }
2517                 pthread_cleanup_pop(1);
2518                 if (running_state == DAEMON_CONFIGURE) {
2519                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2520                         lock(&vecs->lock);
2521                         pthread_testcancel();
2522                         if (!need_to_delay_reconfig(vecs)) {
2523                                 reconfigure(vecs);
2524                         } else {
2525                                 conf = get_multipath_config();
2526                                 conf->delayed_reconfig = 1;
2527                                 put_multipath_config(conf);
2528                         }
2529                         lock_cleanup_pop(vecs->lock);
2530                         post_config_state(DAEMON_IDLE);
2531 #ifdef USE_SYSTEMD
2532                         if (!startup_done) {
2533                                 sd_notify(0, "READY=1");
2534                                 startup_done = 1;
2535                         }
2536 #endif
2537                 }
2538         }
2539
2540         lock(&vecs->lock);
2541         conf = get_multipath_config();
2542         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
2543                 vector_foreach_slot(vecs->mpvec, mpp, i)
2544                         dm_queue_if_no_path(mpp->alias, 0);
2545         put_multipath_config(conf);
2546         remove_maps_and_stop_waiters(vecs);
2547         unlock(&vecs->lock);
2548
2549         pthread_cancel(check_thr);
2550         pthread_cancel(uevent_thr);
2551         pthread_cancel(uxlsnr_thr);
2552         pthread_cancel(uevq_thr);
2553
2554         pthread_join(check_thr, NULL);
2555         pthread_join(uevent_thr, NULL);
2556         pthread_join(uxlsnr_thr, NULL);
2557         pthread_join(uevq_thr, NULL);
2558
2559         stop_io_err_stat_thread();
2560
2561         lock(&vecs->lock);
2562         free_pathvec(vecs->pathvec, FREE_PATHS);
2563         vecs->pathvec = NULL;
2564         unlock(&vecs->lock);
2565
2566         pthread_mutex_destroy(&vecs->lock.mutex);
2567         FREE(vecs);
2568         vecs = NULL;
2569
2570         cleanup_foreign();
2571         cleanup_checkers();
2572         cleanup_prio();
2573
2574         dm_lib_release();
2575         dm_lib_exit();
2576
2577         /* We're done here */
2578         condlog(3, "unlink pidfile");
2579         unlink(DEFAULT_PIDFILE);
2580
2581         condlog(2, "--------shut down-------");
2582
2583         if (logsink == 1)
2584                 log_thread_stop();
2585
2586         /*
2587          * Freeing config must be done after condlog() and dm_lib_exit(),
2588          * because logging functions like dlog() and dm_write_log()
2589          * reference the config.
2590          */
2591         conf = rcu_dereference(multipath_conf);
2592         rcu_assign_pointer(multipath_conf, NULL);
2593         call_rcu(&conf->rcu, rcu_free_config);
2594         udev_unref(udev);
2595         udev = NULL;
2596         pthread_attr_destroy(&waiter_attr);
2597         pthread_attr_destroy(&io_err_stat_attr);
2598 #ifdef _DEBUG_
2599         dbg_free_final(NULL);
2600 #endif
2601
2602 #ifdef USE_SYSTEMD
2603         sd_notify(0, "ERRNO=0");
2604 #endif
2605         exit(0);
2606
2607 failed:
2608 #ifdef USE_SYSTEMD
2609         sd_notify(0, "ERRNO=1");
2610 #endif
2611         if (pid_fd >= 0)
2612                 close(pid_fd);
2613         exit(1);
2614 }
2615
2616 static int
2617 daemonize(void)
2618 {
2619         int pid;
2620         int dev_null_fd;
2621
2622         if( (pid = fork()) < 0){
2623                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2624                 return -1;
2625         }
2626         else if (pid != 0)
2627                 return pid;
2628
2629         setsid();
2630
2631         if ( (pid = fork()) < 0)
2632                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2633         else if (pid != 0)
2634                 _exit(0);
2635
2636         if (chdir("/") < 0)
2637                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2638
2639         dev_null_fd = open("/dev/null", O_RDWR);
2640         if (dev_null_fd < 0){
2641                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2642                         strerror(errno));
2643                 _exit(0);
2644         }
2645
2646         close(STDIN_FILENO);
2647         if (dup(dev_null_fd) < 0) {
2648                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2649                         strerror(errno));
2650                 _exit(0);
2651         }
2652         close(STDOUT_FILENO);
2653         if (dup(dev_null_fd) < 0) {
2654                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2655                         strerror(errno));
2656                 _exit(0);
2657         }
2658         close(STDERR_FILENO);
2659         if (dup(dev_null_fd) < 0) {
2660                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2661                         strerror(errno));
2662                 _exit(0);
2663         }
2664         close(dev_null_fd);
2665         daemon_pid = getpid();
2666         return 0;
2667 }
2668
2669 int
2670 main (int argc, char *argv[])
2671 {
2672         extern char *optarg;
2673         extern int optind;
2674         int arg;
2675         int err;
2676         int foreground = 0;
2677         struct config *conf;
2678
2679         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2680                                    "Manipulated through RCU");
2681         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2682                 "Suppress complaints about unprotected running_state reads");
2683         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2684                 "Suppress complaints about this scalar variable");
2685
2686         logsink = 1;
2687
2688         if (getuid() != 0) {
2689                 fprintf(stderr, "need to be root\n");
2690                 exit(1);
2691         }
2692
2693         /* make sure we don't lock any path */
2694         if (chdir("/") < 0)
2695                 fprintf(stderr, "can't chdir to root directory : %s\n",
2696                         strerror(errno));
2697         umask(umask(077) | 022);
2698
2699         pthread_cond_init_mono(&config_cond);
2700
2701         udev = udev_new();
2702         libmp_udev_set_sync_support(0);
2703
2704         while ((arg = getopt(argc, argv, ":dsv:k::Bn")) != EOF ) {
2705                 switch(arg) {
2706                 case 'd':
2707                         foreground = 1;
2708                         if (logsink > 0)
2709                                 logsink = 0;
2710                         //debug=1; /* ### comment me out ### */
2711                         break;
2712                 case 'v':
2713                         if (sizeof(optarg) > sizeof(char *) ||
2714                             !isdigit(optarg[0]))
2715                                 exit(1);
2716
2717                         verbosity = atoi(optarg);
2718                         break;
2719                 case 's':
2720                         logsink = -1;
2721                         break;
2722                 case 'k':
2723                         conf = load_config(DEFAULT_CONFIGFILE);
2724                         if (!conf)
2725                                 exit(1);
2726                         if (verbosity)
2727                                 conf->verbosity = verbosity;
2728                         uxsock_timeout = conf->uxsock_timeout;
2729                         uxclnt(optarg, uxsock_timeout + 100);
2730                         free_config(conf);
2731                         exit(0);
2732                 case 'B':
2733                         bindings_read_only = 1;
2734                         break;
2735                 case 'n':
2736                         ignore_new_devs = 1;
2737                         break;
2738                 default:
2739                         fprintf(stderr, "Invalid argument '-%c'\n",
2740                                 optopt);
2741                         exit(1);
2742                 }
2743         }
2744         if (optind < argc) {
2745                 char cmd[CMDSIZE];
2746                 char * s = cmd;
2747                 char * c = s;
2748
2749                 conf = load_config(DEFAULT_CONFIGFILE);
2750                 if (!conf)
2751                         exit(1);
2752                 if (verbosity)
2753                         conf->verbosity = verbosity;
2754                 uxsock_timeout = conf->uxsock_timeout;
2755                 memset(cmd, 0x0, CMDSIZE);
2756                 while (optind < argc) {
2757                         if (strchr(argv[optind], ' '))
2758                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
2759                         else
2760                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
2761                         optind++;
2762                 }
2763                 c += snprintf(c, s + CMDSIZE - c, "\n");
2764                 uxclnt(s, uxsock_timeout + 100);
2765                 free_config(conf);
2766                 exit(0);
2767         }
2768
2769         if (foreground) {
2770                 if (!isatty(fileno(stdout)))
2771                         setbuf(stdout, NULL);
2772                 err = 0;
2773                 daemon_pid = getpid();
2774         } else
2775                 err = daemonize();
2776
2777         if (err < 0)
2778                 /* error */
2779                 exit(1);
2780         else if (err > 0)
2781                 /* parent dies */
2782                 exit(0);
2783         else
2784                 /* child lives */
2785                 return (child(NULL));
2786 }
2787
2788 void *  mpath_pr_event_handler_fn (void * pathp )
2789 {
2790         struct multipath * mpp;
2791         int i, ret, isFound;
2792         struct path * pp = (struct path *)pathp;
2793         struct prout_param_descriptor *param;
2794         struct prin_resp *resp;
2795
2796         mpp = pp->mpp;
2797
2798         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2799         if (!resp){
2800                 condlog(0,"%s Alloc failed for prin response", pp->dev);
2801                 return NULL;
2802         }
2803
2804         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2805         if (ret != MPATH_PR_SUCCESS )
2806         {
2807                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2808                 goto out;
2809         }
2810
2811         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2812                         resp->prin_descriptor.prin_readkeys.additional_length );
2813
2814         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2815         {
2816                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2817                 ret = MPATH_PR_SUCCESS;
2818                 goto out;
2819         }
2820         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ",
2821                 get_be64(mpp->reservation_key));
2822
2823         isFound =0;
2824         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2825         {
2826                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
2827                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2828                 if (!memcmp(&mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2829                 {
2830                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2831                         isFound =1;
2832                         break;
2833                 }
2834         }
2835         if (!isFound)
2836         {
2837                 condlog(0, "%s: Either device not registered or ", pp->dev);
2838                 condlog(0, "host is not authorised for registration. Skip path");
2839                 ret = MPATH_PR_OTHER;
2840                 goto out;
2841         }
2842
2843         param= malloc(sizeof(struct prout_param_descriptor));
2844         memset(param, 0 , sizeof(struct prout_param_descriptor));
2845         memcpy(param->sa_key, &mpp->reservation_key, 8);
2846         param->num_transportid = 0;
2847
2848         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2849
2850         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2851         if (ret != MPATH_PR_SUCCESS )
2852         {
2853                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2854         }
2855         mpp->prflag = 1;
2856
2857         free(param);
2858 out:
2859         free(resp);
2860         return NULL;
2861 }
2862
2863 int mpath_pr_event_handle(struct path *pp)
2864 {
2865         pthread_t thread;
2866         int rc;
2867         pthread_attr_t attr;
2868         struct multipath * mpp;
2869
2870         mpp = pp->mpp;
2871
2872         if (get_be64(mpp->reservation_key))
2873                 return -1;
2874
2875         pthread_attr_init(&attr);
2876         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2877
2878         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2879         if (rc) {
2880                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2881                 return -1;
2882         }
2883         pthread_attr_destroy(&attr);
2884         rc = pthread_join(thread, NULL);
2885         return 0;
2886 }