dc91151b58fc09f72b97f14370bd99af55653ac3
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <sys/wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <urcu.h>
21 #ifdef USE_SYSTEMD
22 #include <systemd/sd-daemon.h>
23 #endif
24 #include <semaphore.h>
25 #include <time.h>
26
27 /*
28  * libmultipath
29  */
30 #include "time-util.h"
31
32 /*
33  * libcheckers
34  */
35 #include "checkers.h"
36
37 #ifdef USE_SYSTEMD
38 static int use_watchdog;
39 #endif
40
41 int uxsock_timeout;
42
43 /*
44  * libmultipath
45  */
46 #include "parser.h"
47 #include "vector.h"
48 #include "memory.h"
49 #include "config.h"
50 #include "util.h"
51 #include "hwtable.h"
52 #include "defaults.h"
53 #include "structs.h"
54 #include "blacklist.h"
55 #include "structs_vec.h"
56 #include "dmparser.h"
57 #include "devmapper.h"
58 #include "sysfs.h"
59 #include "dict.h"
60 #include "discovery.h"
61 #include "debug.h"
62 #include "propsel.h"
63 #include "uevent.h"
64 #include "switchgroup.h"
65 #include "print.h"
66 #include "configure.h"
67 #include "prio.h"
68 #include "wwids.h"
69 #include "pgpolicies.h"
70 #include "uevent.h"
71 #include "log.h"
72
73 #include "mpath_cmd.h"
74 #include "mpath_persist.h"
75
76 #include "prioritizers/alua_rtpg.h"
77
78 #include "main.h"
79 #include "pidfile.h"
80 #include "uxlsnr.h"
81 #include "uxclnt.h"
82 #include "cli.h"
83 #include "cli_handlers.h"
84 #include "lock.h"
85 #include "waiter.h"
86 #include "wwids.h"
87 #include "../third-party/valgrind/drd.h"
88
89 #define FILE_NAME_SIZE 256
90 #define CMDSIZE 160
91
92 #define LOG_MSG(a, b) \
93 do { \
94         if (pp->offline) \
95                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
96         else if (strlen(b)) \
97                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
98 } while(0)
99
100 struct mpath_event_param
101 {
102         char * devname;
103         struct multipath *mpp;
104 };
105
106 unsigned int mpath_mx_alloc_len;
107
108 int logsink;
109 int verbosity;
110 int bindings_read_only;
111 int ignore_new_devs;
112 enum daemon_status running_state = DAEMON_INIT;
113 pid_t daemon_pid;
114 pthread_mutex_t config_lock = PTHREAD_MUTEX_INITIALIZER;
115 pthread_cond_t config_cond;
116
117 /*
118  * global copy of vecs for use in sig handlers
119  */
120 struct vectors * gvecs;
121
122 struct udev * udev;
123
124 struct config *multipath_conf;
125
126 /* Local variables */
127 static volatile sig_atomic_t exit_sig;
128 static volatile sig_atomic_t reconfig_sig;
129 static volatile sig_atomic_t log_reset_sig;
130
131 const char *
132 daemon_status(void)
133 {
134         switch (running_state) {
135         case DAEMON_INIT:
136                 return "init";
137         case DAEMON_START:
138                 return "startup";
139         case DAEMON_CONFIGURE:
140                 return "configure";
141         case DAEMON_IDLE:
142                 return "idle";
143         case DAEMON_RUNNING:
144                 return "running";
145         case DAEMON_SHUTDOWN:
146                 return "shutdown";
147         }
148         return NULL;
149 }
150
151 /*
152  * I love you too, systemd ...
153  */
154 const char *
155 sd_notify_status(void)
156 {
157         switch (running_state) {
158         case DAEMON_INIT:
159                 return "STATUS=init";
160         case DAEMON_START:
161                 return "STATUS=startup";
162         case DAEMON_CONFIGURE:
163                 return "STATUS=configure";
164         case DAEMON_IDLE:
165                 return "STATUS=idle";
166         case DAEMON_RUNNING:
167                 return "STATUS=running";
168         case DAEMON_SHUTDOWN:
169                 return "STATUS=shutdown";
170         }
171         return NULL;
172 }
173
174 static void config_cleanup(void *arg)
175 {
176         pthread_mutex_unlock(&config_lock);
177 }
178
179 void post_config_state(enum daemon_status state)
180 {
181         pthread_mutex_lock(&config_lock);
182         if (state != running_state) {
183                 running_state = state;
184                 pthread_cond_broadcast(&config_cond);
185 #ifdef USE_SYSTEMD
186                 sd_notify(0, sd_notify_status());
187 #endif
188         }
189         pthread_mutex_unlock(&config_lock);
190 }
191
192 int set_config_state(enum daemon_status state)
193 {
194         int rc = 0;
195
196         pthread_cleanup_push(config_cleanup, NULL);
197         pthread_mutex_lock(&config_lock);
198         if (running_state != state) {
199                 if (running_state != DAEMON_IDLE) {
200                         struct timespec ts;
201
202                         clock_gettime(CLOCK_MONOTONIC, &ts);
203                         ts.tv_sec += 1;
204                         rc = pthread_cond_timedwait(&config_cond,
205                                                     &config_lock, &ts);
206                 }
207                 if (!rc) {
208                         running_state = state;
209                         pthread_cond_broadcast(&config_cond);
210 #ifdef USE_SYSTEMD
211                         sd_notify(0, sd_notify_status());
212 #endif
213                 }
214         }
215         pthread_cleanup_pop(1);
216         return rc;
217 }
218
219 struct config *get_multipath_config(void)
220 {
221         rcu_read_lock();
222         return rcu_dereference(multipath_conf);
223 }
224
225 void put_multipath_config(struct config *conf)
226 {
227         rcu_read_unlock();
228 }
229
230 static int
231 need_switch_pathgroup (struct multipath * mpp, int refresh)
232 {
233         struct pathgroup * pgp;
234         struct path * pp;
235         unsigned int i, j;
236         struct config *conf;
237
238         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
239                 return 0;
240
241         /*
242          * Refresh path priority values
243          */
244         if (refresh) {
245                 vector_foreach_slot (mpp->pg, pgp, i) {
246                         vector_foreach_slot (pgp->paths, pp, j) {
247                                 conf = get_multipath_config();
248                                 pathinfo(pp, conf, DI_PRIO);
249                                 put_multipath_config(conf);
250                         }
251                 }
252         }
253
254         if (!mpp->pg || VECTOR_SIZE(mpp->paths) == 0)
255                 return 0;
256
257         mpp->bestpg = select_path_group(mpp);
258
259         if (mpp->bestpg != mpp->nextpg)
260                 return 1;
261
262         return 0;
263 }
264
265 static void
266 switch_pathgroup (struct multipath * mpp)
267 {
268         mpp->stat_switchgroup++;
269         dm_switchgroup(mpp->alias, mpp->bestpg);
270         condlog(2, "%s: switch to path group #%i",
271                  mpp->alias, mpp->bestpg);
272 }
273
274 static int
275 coalesce_maps(struct vectors *vecs, vector nmpv)
276 {
277         struct multipath * ompp;
278         vector ompv = vecs->mpvec;
279         unsigned int i, reassign_maps;
280         struct config *conf;
281
282         conf = get_multipath_config();
283         reassign_maps = conf->reassign_maps;
284         put_multipath_config(conf);
285         vector_foreach_slot (ompv, ompp, i) {
286                 condlog(3, "%s: coalesce map", ompp->alias);
287                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
288                         /*
289                          * remove all current maps not allowed by the
290                          * current configuration
291                          */
292                         if (dm_flush_map(ompp->alias)) {
293                                 condlog(0, "%s: unable to flush devmap",
294                                         ompp->alias);
295                                 /*
296                                  * may be just because the device is open
297                                  */
298                                 if (setup_multipath(vecs, ompp) != 0) {
299                                         i--;
300                                         continue;
301                                 }
302                                 if (!vector_alloc_slot(nmpv))
303                                         return 1;
304
305                                 vector_set_slot(nmpv, ompp);
306
307                                 vector_del_slot(ompv, i);
308                                 i--;
309                         }
310                         else {
311                                 dm_lib_release();
312                                 condlog(2, "%s devmap removed", ompp->alias);
313                         }
314                 } else if (reassign_maps) {
315                         condlog(3, "%s: Reassign existing device-mapper"
316                                 " devices", ompp->alias);
317                         dm_reassign(ompp->alias);
318                 }
319         }
320         return 0;
321 }
322
323 void
324 sync_map_state(struct multipath *mpp)
325 {
326         struct pathgroup *pgp;
327         struct path *pp;
328         unsigned int i, j;
329
330         if (!mpp->pg)
331                 return;
332
333         vector_foreach_slot (mpp->pg, pgp, i){
334                 vector_foreach_slot (pgp->paths, pp, j){
335                         if (pp->state == PATH_UNCHECKED ||
336                             pp->state == PATH_WILD ||
337                             pp->state == PATH_DELAYED)
338                                 continue;
339                         if ((pp->dmstate == PSTATE_FAILED ||
340                              pp->dmstate == PSTATE_UNDEF) &&
341                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
342                                 dm_reinstate_path(mpp->alias, pp->dev_t);
343                         else if ((pp->dmstate == PSTATE_ACTIVE ||
344                                   pp->dmstate == PSTATE_UNDEF) &&
345                                  (pp->state == PATH_DOWN ||
346                                   pp->state == PATH_SHAKY))
347                                 dm_fail_path(mpp->alias, pp->dev_t);
348                 }
349         }
350 }
351
352 static void
353 sync_maps_state(vector mpvec)
354 {
355         unsigned int i;
356         struct multipath *mpp;
357
358         vector_foreach_slot (mpvec, mpp, i)
359                 sync_map_state(mpp);
360 }
361
362 static int
363 flush_map(struct multipath * mpp, struct vectors * vecs, int nopaths)
364 {
365         int r;
366
367         if (nopaths)
368                 r = dm_flush_map_nopaths(mpp->alias, mpp->deferred_remove);
369         else
370                 r = dm_flush_map(mpp->alias);
371         /*
372          * clear references to this map before flushing so we can ignore
373          * the spurious uevent we may generate with the dm_flush_map call below
374          */
375         if (r) {
376                 /*
377                  * May not really be an error -- if the map was already flushed
378                  * from the device mapper by dmsetup(8) for instance.
379                  */
380                 if (r == 1)
381                         condlog(0, "%s: can't flush", mpp->alias);
382                 else {
383                         condlog(2, "%s: devmap deferred remove", mpp->alias);
384                         mpp->deferred_remove = DEFERRED_REMOVE_IN_PROGRESS;
385                 }
386                 return r;
387         }
388         else {
389                 dm_lib_release();
390                 condlog(2, "%s: map flushed", mpp->alias);
391         }
392
393         orphan_paths(vecs->pathvec, mpp);
394         remove_map_and_stop_waiter(mpp, vecs, 1);
395
396         return 0;
397 }
398
399 int
400 update_map (struct multipath *mpp, struct vectors *vecs)
401 {
402         int retries = 3;
403         char params[PARAMS_SIZE] = {0};
404
405 retry:
406         condlog(4, "%s: updating new map", mpp->alias);
407         if (adopt_paths(vecs->pathvec, mpp)) {
408                 condlog(0, "%s: failed to adopt paths for new map update",
409                         mpp->alias);
410                 retries = -1;
411                 goto fail;
412         }
413         verify_paths(mpp, vecs);
414         mpp->flush_on_last_del = FLUSH_UNDEF;
415         mpp->action = ACT_RELOAD;
416
417         if (setup_map(mpp, params, PARAMS_SIZE)) {
418                 condlog(0, "%s: failed to setup new map in update", mpp->alias);
419                 retries = -1;
420                 goto fail;
421         }
422         if (domap(mpp, params, 1) <= 0 && retries-- > 0) {
423                 condlog(0, "%s: map_udate sleep", mpp->alias);
424                 sleep(1);
425                 goto retry;
426         }
427         dm_lib_release();
428
429 fail:
430         if (setup_multipath(vecs, mpp))
431                 return 1;
432
433         sync_map_state(mpp);
434
435         if (retries < 0)
436                 condlog(0, "%s: failed reload in new map update", mpp->alias);
437         return 0;
438 }
439
440 static int
441 uev_add_map (struct uevent * uev, struct vectors * vecs)
442 {
443         char *alias;
444         int major = -1, minor = -1, rc;
445
446         condlog(3, "%s: add map (uevent)", uev->kernel);
447         alias = uevent_get_dm_name(uev);
448         if (!alias) {
449                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
450                 major = uevent_get_major(uev);
451                 minor = uevent_get_minor(uev);
452                 alias = dm_mapname(major, minor);
453                 if (!alias) {
454                         condlog(2, "%s: mapname not found for %d:%d",
455                                 uev->kernel, major, minor);
456                         return 1;
457                 }
458         }
459         pthread_cleanup_push(cleanup_lock, &vecs->lock);
460         lock(&vecs->lock);
461         pthread_testcancel();
462         rc = ev_add_map(uev->kernel, alias, vecs);
463         lock_cleanup_pop(vecs->lock);
464         FREE(alias);
465         return rc;
466 }
467
468 int
469 ev_add_map (char * dev, char * alias, struct vectors * vecs)
470 {
471         char * refwwid;
472         struct multipath * mpp;
473         int map_present;
474         int r = 1, delayed_reconfig, reassign_maps;
475         struct config *conf;
476
477         map_present = dm_map_present(alias);
478
479         if (map_present && !dm_is_mpath(alias)) {
480                 condlog(4, "%s: not a multipath map", alias);
481                 return 0;
482         }
483
484         mpp = find_mp_by_alias(vecs->mpvec, alias);
485
486         if (mpp) {
487                 if (mpp->wait_for_udev > 1) {
488                         if (update_map(mpp, vecs))
489                                 /* setup multipathd removed the map */
490                                 return 1;
491                 }
492                 conf = get_multipath_config();
493                 delayed_reconfig = conf->delayed_reconfig;
494                 reassign_maps = conf->reassign_maps;
495                 put_multipath_config(conf);
496                 if (mpp->wait_for_udev) {
497                         mpp->wait_for_udev = 0;
498                         if (delayed_reconfig &&
499                             !need_to_delay_reconfig(vecs)) {
500                                 condlog(2, "reconfigure (delayed)");
501                                 set_config_state(DAEMON_CONFIGURE);
502                                 return 0;
503                         }
504                 }
505                 /*
506                  * Not really an error -- we generate our own uevent
507                  * if we create a multipath mapped device as a result
508                  * of uev_add_path
509                  */
510                 if (reassign_maps) {
511                         condlog(3, "%s: Reassign existing device-mapper devices",
512                                 alias);
513                         dm_reassign(alias);
514                 }
515                 return 0;
516         }
517         condlog(2, "%s: adding map", alias);
518
519         /*
520          * now we can register the map
521          */
522         if (map_present) {
523                 if ((mpp = add_map_without_path(vecs, alias))) {
524                         sync_map_state(mpp);
525                         condlog(2, "%s: devmap %s registered", alias, dev);
526                         return 0;
527                 } else {
528                         condlog(2, "%s: uev_add_map failed", dev);
529                         return 1;
530                 }
531         }
532         r = get_refwwid(CMD_NONE, dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
533
534         if (refwwid) {
535                 r = coalesce_paths(vecs, NULL, refwwid, 0, CMD_NONE);
536                 dm_lib_release();
537         }
538
539         if (!r)
540                 condlog(2, "%s: devmap %s added", alias, dev);
541         else if (r == 2)
542                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
543         else
544                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
545
546         FREE(refwwid);
547         return r;
548 }
549
550 static int
551 uev_remove_map (struct uevent * uev, struct vectors * vecs)
552 {
553         char *alias;
554         int minor;
555         struct multipath *mpp;
556
557         condlog(2, "%s: remove map (uevent)", uev->kernel);
558         alias = uevent_get_dm_name(uev);
559         if (!alias) {
560                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
561                 return 0;
562         }
563         minor = uevent_get_minor(uev);
564
565         pthread_cleanup_push(cleanup_lock, &vecs->lock);
566         lock(&vecs->lock);
567         pthread_testcancel();
568         mpp = find_mp_by_minor(vecs->mpvec, minor);
569
570         if (!mpp) {
571                 condlog(2, "%s: devmap not registered, can't remove",
572                         uev->kernel);
573                 goto out;
574         }
575         if (strcmp(mpp->alias, alias)) {
576                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
577                         mpp->alias, mpp->dmi->minor, minor);
578                 goto out;
579         }
580
581         orphan_paths(vecs->pathvec, mpp);
582         remove_map_and_stop_waiter(mpp, vecs, 1);
583 out:
584         lock_cleanup_pop(vecs->lock);
585         FREE(alias);
586         return 0;
587 }
588
589 /* Called from CLI handler */
590 int
591 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
592 {
593         struct multipath * mpp;
594
595         mpp = find_mp_by_minor(vecs->mpvec, minor);
596
597         if (!mpp) {
598                 condlog(2, "%s: devmap not registered, can't remove",
599                         devname);
600                 return 1;
601         }
602         if (strcmp(mpp->alias, alias)) {
603                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
604                         mpp->alias, mpp->dmi->minor, minor);
605                 return 1;
606         }
607         return flush_map(mpp, vecs, 0);
608 }
609
610 static int
611 uev_add_path (struct uevent *uev, struct vectors * vecs)
612 {
613         struct path *pp;
614         int ret = 0, i;
615         struct config *conf;
616
617         condlog(2, "%s: add path (uevent)", uev->kernel);
618         if (strstr(uev->kernel, "..") != NULL) {
619                 /*
620                  * Don't allow relative device names in the pathvec
621                  */
622                 condlog(0, "%s: path name is invalid", uev->kernel);
623                 return 1;
624         }
625
626         pthread_cleanup_push(cleanup_lock, &vecs->lock);
627         lock(&vecs->lock);
628         pthread_testcancel();
629         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
630         if (pp) {
631                 int r;
632
633                 condlog(0, "%s: spurious uevent, path already in pathvec",
634                         uev->kernel);
635                 if (!pp->mpp && !strlen(pp->wwid)) {
636                         condlog(3, "%s: reinitialize path", uev->kernel);
637                         udev_device_unref(pp->udev);
638                         pp->udev = udev_device_ref(uev->udev);
639                         conf = get_multipath_config();
640                         r = pathinfo(pp, conf,
641                                      DI_ALL | DI_BLACKLIST);
642                         put_multipath_config(conf);
643                         if (r == PATHINFO_OK)
644                                 ret = ev_add_path(pp, vecs);
645                         else if (r == PATHINFO_SKIPPED) {
646                                 condlog(3, "%s: remove blacklisted path",
647                                         uev->kernel);
648                                 i = find_slot(vecs->pathvec, (void *)pp);
649                                 if (i != -1)
650                                         vector_del_slot(vecs->pathvec, i);
651                                 free_path(pp);
652                         } else {
653                                 condlog(0, "%s: failed to reinitialize path",
654                                         uev->kernel);
655                                 ret = 1;
656                         }
657                 }
658         }
659         lock_cleanup_pop(vecs->lock);
660         if (pp)
661                 return ret;
662
663         /*
664          * get path vital state
665          */
666         conf = get_multipath_config();
667         ret = alloc_path_with_pathinfo(conf, uev->udev,
668                                        DI_ALL, &pp);
669         put_multipath_config(conf);
670         if (!pp) {
671                 if (ret == PATHINFO_SKIPPED)
672                         return 0;
673                 condlog(3, "%s: failed to get path info", uev->kernel);
674                 return 1;
675         }
676         pthread_cleanup_push(cleanup_lock, &vecs->lock);
677         lock(&vecs->lock);
678         pthread_testcancel();
679         ret = store_path(vecs->pathvec, pp);
680         if (!ret) {
681                 conf = get_multipath_config();
682                 pp->checkint = conf->checkint;
683                 put_multipath_config(conf);
684                 ret = ev_add_path(pp, vecs);
685         } else {
686                 condlog(0, "%s: failed to store path info, "
687                         "dropping event",
688                         uev->kernel);
689                 free_path(pp);
690                 ret = 1;
691         }
692         lock_cleanup_pop(vecs->lock);
693         return ret;
694 }
695
696 /*
697  * returns:
698  * 0: added
699  * 1: error
700  */
701 int
702 ev_add_path (struct path * pp, struct vectors * vecs)
703 {
704         struct multipath * mpp;
705         char params[PARAMS_SIZE] = {0};
706         int retries = 3;
707         int start_waiter = 0;
708         int ret;
709
710         /*
711          * need path UID to go any further
712          */
713         if (strlen(pp->wwid) == 0) {
714                 condlog(0, "%s: failed to get path uid", pp->dev);
715                 goto fail; /* leave path added to pathvec */
716         }
717         mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
718         if (mpp && mpp->wait_for_udev &&
719             (pathcount(mpp, PATH_UP) > 0 ||
720              (pathcount(mpp, PATH_GHOST) > 0 && pp->tpgs != TPGS_IMPLICIT))) {
721                 /* if wait_for_udev is set and valid paths exist */
722                 mpp->wait_for_udev = 2;
723                 orphan_path(pp, "waiting for create to complete");
724                 return 0;
725         }
726
727         pp->mpp = mpp;
728 rescan:
729         if (mpp) {
730                 if (pp->size && mpp->size != pp->size) {
731                         condlog(0, "%s: failed to add new path %s, "
732                                 "device size mismatch",
733                                 mpp->alias, pp->dev);
734                         int i = find_slot(vecs->pathvec, (void *)pp);
735                         if (i != -1)
736                                 vector_del_slot(vecs->pathvec, i);
737                         free_path(pp);
738                         return 1;
739                 }
740
741                 condlog(4,"%s: adopting all paths for path %s",
742                         mpp->alias, pp->dev);
743                 if (adopt_paths(vecs->pathvec, mpp))
744                         goto fail; /* leave path added to pathvec */
745
746                 verify_paths(mpp, vecs);
747                 mpp->flush_on_last_del = FLUSH_UNDEF;
748                 mpp->action = ACT_RELOAD;
749         } else {
750                 if (!should_multipath(pp, vecs->pathvec)) {
751                         orphan_path(pp, "only one path");
752                         return 0;
753                 }
754                 condlog(4,"%s: creating new map", pp->dev);
755                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
756                         mpp->action = ACT_CREATE;
757                         /*
758                          * We don't depend on ACT_CREATE, as domap will
759                          * set it to ACT_NOTHING when complete.
760                          */
761                         start_waiter = 1;
762                 }
763                 if (!start_waiter)
764                         goto fail; /* leave path added to pathvec */
765         }
766
767         /* persistent reservation check*/
768         mpath_pr_event_handle(pp);
769
770         /*
771          * push the map to the device-mapper
772          */
773         if (setup_map(mpp, params, PARAMS_SIZE)) {
774                 condlog(0, "%s: failed to setup map for addition of new "
775                         "path %s", mpp->alias, pp->dev);
776                 goto fail_map;
777         }
778         /*
779          * reload the map for the multipath mapped device
780          */
781 retry:
782         ret = domap(mpp, params, 1);
783         if (ret <= 0) {
784                 if (ret < 0 && retries-- > 0) {
785                         condlog(0, "%s: retry domap for addition of new "
786                                 "path %s", mpp->alias, pp->dev);
787                         sleep(1);
788                         goto retry;
789                 }
790                 condlog(0, "%s: failed in domap for addition of new "
791                         "path %s", mpp->alias, pp->dev);
792                 /*
793                  * deal with asynchronous uevents :((
794                  */
795                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
796                         condlog(0, "%s: ev_add_path sleep", mpp->alias);
797                         sleep(1);
798                         update_mpp_paths(mpp, vecs->pathvec);
799                         goto rescan;
800                 }
801                 else if (mpp->action == ACT_RELOAD)
802                         condlog(0, "%s: giving up reload", mpp->alias);
803                 else
804                         goto fail_map;
805         }
806         dm_lib_release();
807
808         /*
809          * update our state from kernel regardless of create or reload
810          */
811         if (setup_multipath(vecs, mpp))
812                 goto fail; /* if setup_multipath fails, it removes the map */
813
814         sync_map_state(mpp);
815
816         if ((mpp->action == ACT_CREATE ||
817              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
818             start_waiter_thread(mpp, vecs))
819                         goto fail_map;
820
821         if (retries >= 0) {
822                 condlog(2, "%s [%s]: path added to devmap %s",
823                         pp->dev, pp->dev_t, mpp->alias);
824                 return 0;
825         } else
826                 goto fail;
827
828 fail_map:
829         remove_map(mpp, vecs, 1);
830 fail:
831         orphan_path(pp, "failed to add path");
832         return 1;
833 }
834
835 static int
836 uev_remove_path (struct uevent *uev, struct vectors * vecs)
837 {
838         struct path *pp;
839         int ret;
840
841         condlog(2, "%s: remove path (uevent)", uev->kernel);
842         pthread_cleanup_push(cleanup_lock, &vecs->lock);
843         lock(&vecs->lock);
844         pthread_testcancel();
845         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
846         if (pp)
847                 ret = ev_remove_path(pp, vecs);
848         lock_cleanup_pop(vecs->lock);
849         if (!pp) {
850                 /* Not an error; path might have been purged earlier */
851                 condlog(0, "%s: path already removed", uev->kernel);
852                 return 0;
853         }
854         return ret;
855 }
856
857 int
858 ev_remove_path (struct path *pp, struct vectors * vecs)
859 {
860         struct multipath * mpp;
861         int i, retval = 0;
862         char params[PARAMS_SIZE] = {0};
863
864         /*
865          * avoid referring to the map of an orphaned path
866          */
867         if ((mpp = pp->mpp)) {
868                 /*
869                  * transform the mp->pg vector of vectors of paths
870                  * into a mp->params string to feed the device-mapper
871                  */
872                 if (update_mpp_paths(mpp, vecs->pathvec)) {
873                         condlog(0, "%s: failed to update paths",
874                                 mpp->alias);
875                         goto fail;
876                 }
877                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
878                         vector_del_slot(mpp->paths, i);
879
880                 /*
881                  * remove the map IFF removing the last path
882                  */
883                 if (VECTOR_SIZE(mpp->paths) == 0) {
884                         char alias[WWID_SIZE];
885
886                         /*
887                          * flush_map will fail if the device is open
888                          */
889                         strncpy(alias, mpp->alias, WWID_SIZE);
890                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
891                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
892                                 mpp->retry_tick = 0;
893                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
894                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
895                                 mpp->stat_map_failures++;
896                                 dm_queue_if_no_path(mpp->alias, 0);
897                         }
898                         if (!flush_map(mpp, vecs, 1)) {
899                                 condlog(2, "%s: removed map after"
900                                         " removing all paths",
901                                         alias);
902                                 retval = 0;
903                                 goto out;
904                         }
905                         /*
906                          * Not an error, continue
907                          */
908                 }
909
910                 if (setup_map(mpp, params, PARAMS_SIZE)) {
911                         condlog(0, "%s: failed to setup map for"
912                                 " removal of path %s", mpp->alias, pp->dev);
913                         goto fail;
914                 }
915
916                 if (mpp->wait_for_udev) {
917                         mpp->wait_for_udev = 2;
918                         goto out;
919                 }
920
921                 /*
922                  * reload the map
923                  */
924                 mpp->action = ACT_RELOAD;
925                 if (domap(mpp, params, 1) <= 0) {
926                         condlog(0, "%s: failed in domap for "
927                                 "removal of path %s",
928                                 mpp->alias, pp->dev);
929                         retval = 1;
930                 } else {
931                         /*
932                          * update our state from kernel
933                          */
934                         if (setup_multipath(vecs, mpp))
935                                 return 1;
936                         sync_map_state(mpp);
937
938                         condlog(2, "%s [%s]: path removed from map %s",
939                                 pp->dev, pp->dev_t, mpp->alias);
940                 }
941         }
942
943 out:
944         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
945                 vector_del_slot(vecs->pathvec, i);
946
947         free_path(pp);
948
949         return retval;
950
951 fail:
952         remove_map_and_stop_waiter(mpp, vecs, 1);
953         return 1;
954 }
955
956 static int
957 uev_update_path (struct uevent *uev, struct vectors * vecs)
958 {
959         int ro, retval = 0;
960         struct path * pp;
961         struct config *conf;
962         int disable_changed_wwids;
963
964         conf = get_multipath_config();
965         disable_changed_wwids = conf->disable_changed_wwids;
966         put_multipath_config(conf);
967
968         ro = uevent_get_disk_ro(uev);
969
970         pthread_cleanup_push(cleanup_lock, &vecs->lock);
971         lock(&vecs->lock);
972         pthread_testcancel();
973
974         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
975         if (pp) {
976                 struct multipath *mpp = pp->mpp;
977
978                 if (disable_changed_wwids &&
979                     (strlen(pp->wwid) || pp->wwid_changed)) {
980                         char wwid[WWID_SIZE];
981
982                         strcpy(wwid, pp->wwid);
983                         get_uid(pp, pp->state, uev->udev);
984                         if (strcmp(wwid, pp->wwid) != 0) {
985                                 condlog(0, "%s: path wwid changed from '%s' to '%s'. disallowing", uev->kernel, wwid, pp->wwid);
986                                 strcpy(pp->wwid, wwid);
987                                 if (!pp->wwid_changed) {
988                                         pp->wwid_changed = 1;
989                                         pp->tick = 1;
990                                         dm_fail_path(pp->mpp->alias, pp->dev_t);
991                                 }
992                                 goto out;
993                         } else
994                                 pp->wwid_changed = 0;
995                 }
996
997                 if (pp->initialized == INIT_REQUESTED_UDEV)
998                         retval = uev_add_path(uev, vecs);
999                 else if (mpp && ro >= 0) {
1000                         condlog(2, "%s: update path write_protect to '%d' (uevent)", uev->kernel, ro);
1001
1002                         if (mpp->wait_for_udev)
1003                                 mpp->wait_for_udev = 2;
1004                         else {
1005                                 retval = reload_map(vecs, mpp, 0, 1);
1006                                 condlog(2, "%s: map %s reloaded (retval %d)",
1007                                         uev->kernel, mpp->alias, retval);
1008                         }
1009                 }
1010         }
1011 out:
1012         lock_cleanup_pop(vecs->lock);
1013         if (!pp) {
1014                 /* If the path is blacklisted, print a debug/non-default verbosity message. */
1015                 if (uev->udev) {
1016                         int flag = DI_SYSFS | DI_WWID;
1017
1018                         conf = get_multipath_config();
1019                         retval = alloc_path_with_pathinfo(conf, uev->udev, flag, NULL);
1020                         put_multipath_config(conf);
1021
1022                         if (retval == PATHINFO_SKIPPED) {
1023                                 condlog(3, "%s: spurious uevent, path is blacklisted", uev->kernel);
1024                                 return 0;
1025                         }
1026                 }
1027
1028                 condlog(0, "%s: spurious uevent, path not found", uev->kernel);
1029         }
1030
1031         return retval;
1032 }
1033
1034 static int
1035 map_discovery (struct vectors * vecs)
1036 {
1037         struct multipath * mpp;
1038         unsigned int i;
1039
1040         if (dm_get_maps(vecs->mpvec))
1041                 return 1;
1042
1043         vector_foreach_slot (vecs->mpvec, mpp, i)
1044                 if (setup_multipath(vecs, mpp))
1045                         i--;
1046
1047         return 0;
1048 }
1049
1050 int
1051 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
1052 {
1053         struct vectors * vecs;
1054         int r;
1055
1056         *reply = NULL;
1057         *len = 0;
1058         vecs = (struct vectors *)trigger_data;
1059
1060         r = parse_cmd(str, reply, len, vecs, uxsock_timeout / 1000);
1061
1062         if (r > 0) {
1063                 if (r == ETIMEDOUT)
1064                         *reply = STRDUP("timeout\n");
1065                 else
1066                         *reply = STRDUP("fail\n");
1067                 *len = strlen(*reply) + 1;
1068                 r = 1;
1069         }
1070         else if (!r && *len == 0) {
1071                 *reply = STRDUP("ok\n");
1072                 *len = strlen(*reply) + 1;
1073                 r = 0;
1074         }
1075         /* else if (r < 0) leave *reply alone */
1076
1077         return r;
1078 }
1079
1080 static int
1081 uev_discard(char * devpath)
1082 {
1083         char *tmp;
1084         char a[11], b[11];
1085
1086         /*
1087          * keep only block devices, discard partitions
1088          */
1089         tmp = strstr(devpath, "/block/");
1090         if (tmp == NULL){
1091                 condlog(4, "no /block/ in '%s'", devpath);
1092                 return 1;
1093         }
1094         if (sscanf(tmp, "/block/%10s", a) != 1 ||
1095             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
1096                 condlog(4, "discard event on %s", devpath);
1097                 return 1;
1098         }
1099         return 0;
1100 }
1101
1102 int
1103 uev_trigger (struct uevent * uev, void * trigger_data)
1104 {
1105         int r = 0;
1106         struct vectors * vecs;
1107         struct config *conf;
1108
1109         vecs = (struct vectors *)trigger_data;
1110
1111         if (uev_discard(uev->devpath))
1112                 return 0;
1113
1114         pthread_cleanup_push(config_cleanup, NULL);
1115         pthread_mutex_lock(&config_lock);
1116         if (running_state != DAEMON_IDLE &&
1117             running_state != DAEMON_RUNNING)
1118                 pthread_cond_wait(&config_cond, &config_lock);
1119         pthread_cleanup_pop(1);
1120
1121         if (running_state == DAEMON_SHUTDOWN)
1122                 return 0;
1123
1124         /*
1125          * device map event
1126          * Add events are ignored here as the tables
1127          * are not fully initialised then.
1128          */
1129         if (!strncmp(uev->kernel, "dm-", 3)) {
1130                 if (!strncmp(uev->action, "change", 6)) {
1131                         r = uev_add_map(uev, vecs);
1132                         goto out;
1133                 }
1134                 if (!strncmp(uev->action, "remove", 6)) {
1135                         r = uev_remove_map(uev, vecs);
1136                         goto out;
1137                 }
1138                 goto out;
1139         }
1140
1141         /*
1142          * path add/remove event
1143          */
1144         conf = get_multipath_config();
1145         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
1146                            uev->kernel) > 0) {
1147                 put_multipath_config(conf);
1148                 goto out;
1149         }
1150         put_multipath_config(conf);
1151
1152         if (!strncmp(uev->action, "add", 3)) {
1153                 r = uev_add_path(uev, vecs);
1154                 goto out;
1155         }
1156         if (!strncmp(uev->action, "remove", 6)) {
1157                 r = uev_remove_path(uev, vecs);
1158                 goto out;
1159         }
1160         if (!strncmp(uev->action, "change", 6)) {
1161                 r = uev_update_path(uev, vecs);
1162                 goto out;
1163         }
1164
1165 out:
1166         return r;
1167 }
1168
1169 static void rcu_unregister(void *param)
1170 {
1171         rcu_unregister_thread();
1172 }
1173
1174 static void *
1175 ueventloop (void * ap)
1176 {
1177         struct udev *udev = ap;
1178
1179         pthread_cleanup_push(rcu_unregister, NULL);
1180         rcu_register_thread();
1181         if (uevent_listen(udev))
1182                 condlog(0, "error starting uevent listener");
1183         pthread_cleanup_pop(1);
1184         return NULL;
1185 }
1186
1187 static void *
1188 uevqloop (void * ap)
1189 {
1190         pthread_cleanup_push(rcu_unregister, NULL);
1191         rcu_register_thread();
1192         if (uevent_dispatch(&uev_trigger, ap))
1193                 condlog(0, "error starting uevent dispatcher");
1194         pthread_cleanup_pop(1);
1195         return NULL;
1196 }
1197 static void *
1198 uxlsnrloop (void * ap)
1199 {
1200         if (cli_init()) {
1201                 condlog(1, "Failed to init uxsock listener");
1202                 return NULL;
1203         }
1204         pthread_cleanup_push(rcu_unregister, NULL);
1205         rcu_register_thread();
1206         set_handler_callback(LIST+PATHS, cli_list_paths);
1207         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
1208         set_handler_callback(LIST+PATHS+RAW+FMT, cli_list_paths_raw);
1209         set_handler_callback(LIST+PATH, cli_list_path);
1210         set_handler_callback(LIST+MAPS, cli_list_maps);
1211         set_unlocked_handler_callback(LIST+STATUS, cli_list_status);
1212         set_unlocked_handler_callback(LIST+DAEMON, cli_list_daemon);
1213         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
1214         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
1215         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
1216         set_handler_callback(LIST+MAPS+RAW+FMT, cli_list_maps_raw);
1217         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
1218         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
1219         set_handler_callback(LIST+MAPS+JSON, cli_list_maps_json);
1220         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
1221         set_handler_callback(LIST+MAP+FMT, cli_list_map_fmt);
1222         set_handler_callback(LIST+MAP+RAW+FMT, cli_list_map_fmt);
1223         set_handler_callback(LIST+MAP+JSON, cli_list_map_json);
1224         set_handler_callback(LIST+CONFIG, cli_list_config);
1225         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
1226         set_handler_callback(LIST+DEVICES, cli_list_devices);
1227         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
1228         set_handler_callback(ADD+PATH, cli_add_path);
1229         set_handler_callback(DEL+PATH, cli_del_path);
1230         set_handler_callback(ADD+MAP, cli_add_map);
1231         set_handler_callback(DEL+MAP, cli_del_map);
1232         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
1233         set_unlocked_handler_callback(RECONFIGURE, cli_reconfigure);
1234         set_handler_callback(SUSPEND+MAP, cli_suspend);
1235         set_handler_callback(RESUME+MAP, cli_resume);
1236         set_handler_callback(RESIZE+MAP, cli_resize);
1237         set_handler_callback(RELOAD+MAP, cli_reload);
1238         set_handler_callback(RESET+MAP, cli_reassign);
1239         set_handler_callback(REINSTATE+PATH, cli_reinstate);
1240         set_handler_callback(FAIL+PATH, cli_fail);
1241         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
1242         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
1243         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
1244         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
1245         set_unlocked_handler_callback(QUIT, cli_quit);
1246         set_unlocked_handler_callback(SHUTDOWN, cli_shutdown);
1247         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
1248         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
1249         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
1250         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
1251         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
1252
1253         umask(077);
1254         uxsock_listen(&uxsock_trigger, ap);
1255         pthread_cleanup_pop(1);
1256         return NULL;
1257 }
1258
1259 void
1260 exit_daemon (void)
1261 {
1262         post_config_state(DAEMON_SHUTDOWN);
1263 }
1264
1265 static void
1266 fail_path (struct path * pp, int del_active)
1267 {
1268         if (!pp->mpp)
1269                 return;
1270
1271         condlog(2, "checker failed path %s in map %s",
1272                  pp->dev_t, pp->mpp->alias);
1273
1274         dm_fail_path(pp->mpp->alias, pp->dev_t);
1275         if (del_active)
1276                 update_queue_mode_del_path(pp->mpp);
1277 }
1278
1279 /*
1280  * caller must have locked the path list before calling that function
1281  */
1282 static int
1283 reinstate_path (struct path * pp, int add_active)
1284 {
1285         int ret = 0;
1286
1287         if (!pp->mpp)
1288                 return 0;
1289
1290         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t)) {
1291                 condlog(0, "%s: reinstate failed", pp->dev_t);
1292                 ret = 1;
1293         } else {
1294                 condlog(2, "%s: reinstated", pp->dev_t);
1295                 if (add_active)
1296                         update_queue_mode_add_path(pp->mpp);
1297         }
1298         return ret;
1299 }
1300
1301 static void
1302 enable_group(struct path * pp)
1303 {
1304         struct pathgroup * pgp;
1305
1306         /*
1307          * if path is added through uev_add_path, pgindex can be unset.
1308          * next update_strings() will set it, upon map reload event.
1309          *
1310          * we can safely return here, because upon map reload, all
1311          * PG will be enabled.
1312          */
1313         if (!pp->mpp->pg || !pp->pgindex)
1314                 return;
1315
1316         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1317
1318         if (pgp->status == PGSTATE_DISABLED) {
1319                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
1320                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
1321         }
1322 }
1323
1324 static void
1325 mpvec_garbage_collector (struct vectors * vecs)
1326 {
1327         struct multipath * mpp;
1328         unsigned int i;
1329
1330         if (!vecs->mpvec)
1331                 return;
1332
1333         vector_foreach_slot (vecs->mpvec, mpp, i) {
1334                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1335                         condlog(2, "%s: remove dead map", mpp->alias);
1336                         remove_map_and_stop_waiter(mpp, vecs, 1);
1337                         i--;
1338                 }
1339         }
1340 }
1341
1342 /* This is called after a path has started working again. It the multipath
1343  * device for this path uses the followover failback type, and this is the
1344  * best pathgroup, and this is the first path in the pathgroup to come back
1345  * up, then switch to this pathgroup */
1346 static int
1347 followover_should_failback(struct path * pp)
1348 {
1349         struct pathgroup * pgp;
1350         struct path *pp1;
1351         int i;
1352
1353         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1354             !pp->mpp->pg || !pp->pgindex ||
1355             pp->pgindex != pp->mpp->bestpg)
1356                 return 0;
1357
1358         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1359         vector_foreach_slot(pgp->paths, pp1, i) {
1360                 if (pp1 == pp)
1361                         continue;
1362                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1363                         return 0;
1364         }
1365         return 1;
1366 }
1367
1368 static void
1369 missing_uev_wait_tick(struct vectors *vecs)
1370 {
1371         struct multipath * mpp;
1372         unsigned int i;
1373         int timed_out = 0, delayed_reconfig;
1374         struct config *conf;
1375
1376         vector_foreach_slot (vecs->mpvec, mpp, i) {
1377                 if (mpp->wait_for_udev && --mpp->uev_wait_tick <= 0) {
1378                         timed_out = 1;
1379                         condlog(0, "%s: timeout waiting on creation uevent. enabling reloads", mpp->alias);
1380                         if (mpp->wait_for_udev > 1 && update_map(mpp, vecs)) {
1381                                 /* update_map removed map */
1382                                 i--;
1383                                 continue;
1384                         }
1385                         mpp->wait_for_udev = 0;
1386                 }
1387         }
1388
1389         conf = get_multipath_config();
1390         delayed_reconfig = conf->delayed_reconfig;
1391         put_multipath_config(conf);
1392         if (timed_out && delayed_reconfig &&
1393             !need_to_delay_reconfig(vecs)) {
1394                 condlog(2, "reconfigure (delayed)");
1395                 set_config_state(DAEMON_CONFIGURE);
1396         }
1397 }
1398
1399 static void
1400 defered_failback_tick (vector mpvec)
1401 {
1402         struct multipath * mpp;
1403         unsigned int i;
1404
1405         vector_foreach_slot (mpvec, mpp, i) {
1406                 /*
1407                  * defered failback getting sooner
1408                  */
1409                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1410                         mpp->failback_tick--;
1411
1412                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1413                                 switch_pathgroup(mpp);
1414                 }
1415         }
1416 }
1417
1418 static void
1419 retry_count_tick(vector mpvec)
1420 {
1421         struct multipath *mpp;
1422         unsigned int i;
1423
1424         vector_foreach_slot (mpvec, mpp, i) {
1425                 if (mpp->retry_tick > 0) {
1426                         mpp->stat_total_queueing_time++;
1427                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1428                         if(--mpp->retry_tick == 0) {
1429                                 mpp->stat_map_failures++;
1430                                 dm_queue_if_no_path(mpp->alias, 0);
1431                                 condlog(2, "%s: Disable queueing", mpp->alias);
1432                         }
1433                 }
1434         }
1435 }
1436
1437 int update_prio(struct path *pp, int refresh_all)
1438 {
1439         int oldpriority;
1440         struct path *pp1;
1441         struct pathgroup * pgp;
1442         int i, j, changed = 0;
1443         struct config *conf;
1444
1445         if (refresh_all) {
1446                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1447                         vector_foreach_slot (pgp->paths, pp1, j) {
1448                                 oldpriority = pp1->priority;
1449                                 conf = get_multipath_config();
1450                                 pathinfo(pp1, conf, DI_PRIO);
1451                                 put_multipath_config(conf);
1452                                 if (pp1->priority != oldpriority)
1453                                         changed = 1;
1454                         }
1455                 }
1456                 return changed;
1457         }
1458         oldpriority = pp->priority;
1459         conf = get_multipath_config();
1460         pathinfo(pp, conf, DI_PRIO);
1461         put_multipath_config(conf);
1462
1463         if (pp->priority == oldpriority)
1464                 return 0;
1465         return 1;
1466 }
1467
1468 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1469 {
1470         if (reload_map(vecs, mpp, refresh, 1))
1471                 return 1;
1472
1473         dm_lib_release();
1474         if (setup_multipath(vecs, mpp) != 0)
1475                 return 1;
1476         sync_map_state(mpp);
1477
1478         return 0;
1479 }
1480
1481 void repair_path(struct path * pp)
1482 {
1483         if (pp->state != PATH_DOWN)
1484                 return;
1485
1486         checker_repair(&pp->checker);
1487         LOG_MSG(1, checker_message(&pp->checker));
1488 }
1489
1490 static int check_path_reinstate_state(struct path * pp) {
1491         struct timespec curr_time;
1492         if (!((pp->mpp->san_path_err_threshold > 0) &&
1493                                 (pp->mpp->san_path_err_forget_rate > 0) &&
1494                                 (pp->mpp->san_path_err_recovery_time >0))) {
1495                 return 0;
1496         }
1497         
1498         if (pp->disable_reinstate) {
1499                 /* If we don't know how much time has passed, automatically
1500                  * reinstate the path, just to be safe. Also, if there are
1501                  * no other usable paths, reinstate the path
1502                  */
1503                 if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0 ||
1504                                 pp->mpp->nr_active == 0) {
1505                         condlog(2, "%s : reinstating path early", pp->dev);
1506                         goto reinstate_path;
1507                 }
1508                 if ((curr_time.tv_sec - pp->dis_reinstate_time ) > pp->mpp->san_path_err_recovery_time) {
1509                         condlog(2,"%s : reinstate the path after err recovery time", pp->dev);
1510                         goto reinstate_path;
1511                 }
1512                 return 1;
1513         }
1514         /* forget errors on a working path */
1515         if ((pp->state == PATH_UP || pp->state == PATH_GHOST) &&
1516                         pp->path_failures > 0) {
1517                 if (pp->san_path_err_forget_rate > 0){
1518                         pp->san_path_err_forget_rate--;
1519                 } else {
1520                         /* for every san_path_err_forget_rate number of
1521                          * successful path checks decrement path_failures by 1
1522                          */
1523                         pp->path_failures--;
1524                         pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1525                 }
1526                 return 0;
1527         }
1528
1529         /* If the path isn't recovering from a failed state, do nothing */
1530         if (pp->state != PATH_DOWN && pp->state != PATH_SHAKY &&
1531                         pp->state != PATH_TIMEOUT)
1532                 return 0;
1533
1534         if (pp->path_failures == 0)
1535                 pp->san_path_err_forget_rate = pp->mpp->san_path_err_forget_rate;
1536
1537         pp->path_failures++;
1538
1539         /* if we don't know the currently time, we don't know how long to
1540          * delay the path, so there's no point in checking if we should
1541          */
1542
1543         if (clock_gettime(CLOCK_MONOTONIC, &curr_time) != 0)
1544                 return 0;
1545         /* when path failures has exceeded the san_path_err_threshold
1546          * place the path in delayed state till san_path_err_recovery_time
1547          * so that the cutomer can rectify the issue within this time. After
1548          * the completion of san_path_err_recovery_time it should
1549          * automatically reinstate the path
1550          */
1551         if (pp->path_failures > pp->mpp->san_path_err_threshold) {
1552                 condlog(2, "%s : hit error threshold. Delaying path reinstatement", pp->dev);
1553                 pp->dis_reinstate_time = curr_time.tv_sec;
1554                 pp->disable_reinstate = 1;
1555                 return 1;
1556         } else {
1557                 return 0;
1558         }
1559
1560 reinstate_path:
1561         pp->path_failures = 0;
1562         pp->disable_reinstate = 0;
1563         pp->san_path_err_forget_rate = 0;
1564         return 0;
1565 }
1566
1567 /*
1568  * Returns '1' if the path has been checked, '-1' if it was blacklisted
1569  * and '0' otherwise
1570  */
1571 int
1572 check_path (struct vectors * vecs, struct path * pp, int ticks)
1573 {
1574         int newstate;
1575         int new_path_up = 0;
1576         int chkr_new_path_up = 0;
1577         int add_active;
1578         int disable_reinstate = 0;
1579         int oldchkrstate = pp->chkrstate;
1580         int retrigger_tries, checkint;
1581         struct config *conf;
1582         int ret;
1583
1584         if ((pp->initialized == INIT_OK ||
1585              pp->initialized == INIT_REQUESTED_UDEV) && !pp->mpp)
1586                 return 0;
1587
1588         if (pp->tick)
1589                 pp->tick -= (pp->tick > ticks) ? ticks : pp->tick;
1590         if (pp->tick)
1591                 return 0; /* don't check this path yet */
1592
1593         conf = get_multipath_config();
1594         retrigger_tries = conf->retrigger_tries;
1595         checkint = conf->checkint;
1596         put_multipath_config(conf);
1597         if (!pp->mpp && pp->initialized == INIT_MISSING_UDEV &&
1598             pp->retriggers < retrigger_tries) {
1599                 condlog(2, "%s: triggering change event to reinitialize",
1600                         pp->dev);
1601                 pp->initialized = INIT_REQUESTED_UDEV;
1602                 pp->retriggers++;
1603                 sysfs_attr_set_value(pp->udev, "uevent", "change",
1604                                      strlen("change"));
1605                 return 0;
1606         }
1607
1608         /*
1609          * provision a next check soonest,
1610          * in case we exit abnormaly from here
1611          */
1612         pp->tick = checkint;
1613
1614         newstate = path_offline(pp);
1615         /*
1616          * Wait for uevent for removed paths;
1617          * some LLDDs like zfcp keep paths unavailable
1618          * without sending uevents.
1619          */
1620         if (newstate == PATH_REMOVED)
1621                 newstate = PATH_DOWN;
1622
1623         if (newstate == PATH_UP) {
1624                 conf = get_multipath_config();
1625                 newstate = get_state(pp, conf, 1);
1626                 put_multipath_config(conf);
1627         } else
1628                 checker_clear_message(&pp->checker);
1629
1630         if (pp->wwid_changed) {
1631                 condlog(2, "%s: path wwid has changed. Refusing to use",
1632                         pp->dev);
1633                 newstate = PATH_DOWN;
1634         }
1635
1636         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1637                 condlog(2, "%s: unusable path", pp->dev);
1638                 conf = get_multipath_config();
1639                 pathinfo(pp, conf, 0);
1640                 put_multipath_config(conf);
1641                 return 1;
1642         }
1643         if (!pp->mpp) {
1644                 if (!strlen(pp->wwid) && pp->initialized != INIT_MISSING_UDEV &&
1645                     (newstate == PATH_UP || newstate == PATH_GHOST)) {
1646                         condlog(2, "%s: add missing path", pp->dev);
1647                         conf = get_multipath_config();
1648                         ret = pathinfo(pp, conf, DI_ALL | DI_BLACKLIST);
1649                         if (ret == PATHINFO_OK) {
1650                                 ev_add_path(pp, vecs);
1651                                 pp->tick = 1;
1652                         } else if (ret == PATHINFO_SKIPPED) {
1653                                 put_multipath_config(conf);
1654                                 return -1;
1655                         }
1656                         put_multipath_config(conf);
1657                 }
1658                 return 0;
1659         }
1660         /*
1661          * Async IO in flight. Keep the previous path state
1662          * and reschedule as soon as possible
1663          */
1664         if (newstate == PATH_PENDING) {
1665                 pp->tick = 1;
1666                 return 0;
1667         }
1668         /*
1669          * Synchronize with kernel state
1670          */
1671         if (update_multipath_strings(pp->mpp, vecs->pathvec, 1)) {
1672                 condlog(1, "%s: Could not synchronize with kernel state",
1673                         pp->dev);
1674                 pp->dmstate = PSTATE_UNDEF;
1675         }
1676         /* if update_multipath_strings orphaned the path, quit early */
1677         if (!pp->mpp)
1678                 return 0;
1679
1680         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1681                         check_path_reinstate_state(pp)) {
1682                 pp->state = PATH_DELAYED;
1683                 return 1;
1684         }
1685
1686         if ((newstate == PATH_UP || newstate == PATH_GHOST) &&
1687              pp->wait_checks > 0) {
1688                 if (pp->mpp->nr_active > 0) {
1689                         pp->state = PATH_DELAYED;
1690                         pp->wait_checks--;
1691                         return 1;
1692                 } else
1693                         pp->wait_checks = 0;
1694         }
1695
1696         /*
1697          * don't reinstate failed path, if its in stand-by
1698          * and if target supports only implicit tpgs mode.
1699          * this will prevent unnecessary i/o by dm on stand-by
1700          * paths if there are no other active paths in map.
1701          */
1702         disable_reinstate = (newstate == PATH_GHOST &&
1703                             pp->mpp->nr_active == 0 &&
1704                             pp->tpgs == TPGS_IMPLICIT) ? 1 : 0;
1705
1706         pp->chkrstate = newstate;
1707         if (newstate != pp->state) {
1708                 int oldstate = pp->state;
1709                 pp->state = newstate;
1710
1711                 LOG_MSG(1, checker_message(&pp->checker));
1712
1713                 /*
1714                  * upon state change, reset the checkint
1715                  * to the shortest delay
1716                  */
1717                 conf = get_multipath_config();
1718                 pp->checkint = conf->checkint;
1719                 put_multipath_config(conf);
1720
1721                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY || newstate == PATH_TIMEOUT) {
1722                         /*
1723                          * proactively fail path in the DM
1724                          */
1725                         if (oldstate == PATH_UP ||
1726                             oldstate == PATH_GHOST) {
1727                                 fail_path(pp, 1);
1728                                 if (pp->mpp->delay_wait_checks > 0 &&
1729                                     pp->watch_checks > 0) {
1730                                         pp->wait_checks = pp->mpp->delay_wait_checks;
1731                                         pp->watch_checks = 0;
1732                                 }
1733                         }else
1734                                 fail_path(pp, 0);
1735
1736                         /*
1737                          * cancel scheduled failback
1738                          */
1739                         pp->mpp->failback_tick = 0;
1740
1741                         pp->mpp->stat_path_failures++;
1742                         repair_path(pp);
1743                         return 1;
1744                 }
1745
1746                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1747                         if ( pp->mpp && pp->mpp->prflag ){
1748                                 /*
1749                                  * Check Persistent Reservation.
1750                                  */
1751                         condlog(2, "%s: checking persistent reservation "
1752                                 "registration", pp->dev);
1753                         mpath_pr_event_handle(pp);
1754                         }
1755                 }
1756
1757                 /*
1758                  * reinstate this path
1759                  */
1760                 if (oldstate != PATH_UP &&
1761                     oldstate != PATH_GHOST) {
1762                         if (pp->mpp->delay_watch_checks > 0)
1763                                 pp->watch_checks = pp->mpp->delay_watch_checks;
1764                         add_active = 1;
1765                 } else {
1766                         if (pp->watch_checks > 0)
1767                                 pp->watch_checks--;
1768                         add_active = 0;
1769                 }
1770                 if (!disable_reinstate && reinstate_path(pp, add_active)) {
1771                         condlog(3, "%s: reload map", pp->dev);
1772                         ev_add_path(pp, vecs);
1773                         pp->tick = 1;
1774                         return 0;
1775                 }
1776                 new_path_up = 1;
1777
1778                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1779                         chkr_new_path_up = 1;
1780
1781                 /*
1782                  * if at least one path is up in a group, and
1783                  * the group is disabled, re-enable it
1784                  */
1785                 if (newstate == PATH_UP)
1786                         enable_group(pp);
1787         }
1788         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1789                 if ((pp->dmstate == PSTATE_FAILED ||
1790                     pp->dmstate == PSTATE_UNDEF) &&
1791                     !disable_reinstate) {
1792                         /* Clear IO errors */
1793                         if (reinstate_path(pp, 0)) {
1794                                 condlog(3, "%s: reload map", pp->dev);
1795                                 ev_add_path(pp, vecs);
1796                                 pp->tick = 1;
1797                                 return 0;
1798                         }
1799                 } else {
1800                         unsigned int max_checkint;
1801                         LOG_MSG(4, checker_message(&pp->checker));
1802                         conf = get_multipath_config();
1803                         max_checkint = conf->max_checkint;
1804                         put_multipath_config(conf);
1805                         if (pp->checkint != max_checkint) {
1806                                 /*
1807                                  * double the next check delay.
1808                                  * max at conf->max_checkint
1809                                  */
1810                                 if (pp->checkint < (max_checkint / 2))
1811                                         pp->checkint = 2 * pp->checkint;
1812                                 else
1813                                         pp->checkint = max_checkint;
1814
1815                                 condlog(4, "%s: delay next check %is",
1816                                         pp->dev_t, pp->checkint);
1817                         }
1818                         if (pp->watch_checks > 0)
1819                                 pp->watch_checks--;
1820                         pp->tick = pp->checkint;
1821                 }
1822         }
1823         else if (newstate == PATH_DOWN) {
1824                 int log_checker_err;
1825
1826                 conf = get_multipath_config();
1827                 log_checker_err = conf->log_checker_err;
1828                 put_multipath_config(conf);
1829                 if (log_checker_err == LOG_CHKR_ERR_ONCE)
1830                         LOG_MSG(3, checker_message(&pp->checker));
1831                 else
1832                         LOG_MSG(2, checker_message(&pp->checker));
1833         }
1834
1835         pp->state = newstate;
1836         repair_path(pp);
1837
1838         if (pp->mpp->wait_for_udev)
1839                 return 1;
1840         /*
1841          * path prio refreshing
1842          */
1843         condlog(4, "path prio refresh");
1844
1845         if (update_prio(pp, new_path_up) &&
1846             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1847              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1848                 update_path_groups(pp->mpp, vecs, !new_path_up);
1849         else if (need_switch_pathgroup(pp->mpp, 0)) {
1850                 if (pp->mpp->pgfailback > 0 &&
1851                     (new_path_up || pp->mpp->failback_tick <= 0))
1852                         pp->mpp->failback_tick =
1853                                 pp->mpp->pgfailback + 1;
1854                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1855                          (chkr_new_path_up && followover_should_failback(pp)))
1856                         switch_pathgroup(pp->mpp);
1857         }
1858         return 1;
1859 }
1860
1861 static void init_path_check_interval(struct vectors *vecs)
1862 {
1863         struct config *conf;
1864         struct path *pp;
1865         unsigned int i;
1866
1867         vector_foreach_slot (vecs->pathvec, pp, i) {
1868                 conf = get_multipath_config();
1869                 pp->checkint = conf->checkint;
1870                 put_multipath_config(conf);
1871         }
1872 }
1873
1874 static void *
1875 checkerloop (void *ap)
1876 {
1877         struct vectors *vecs;
1878         struct path *pp;
1879         int count = 0;
1880         unsigned int i;
1881         struct itimerval timer_tick_it;
1882         struct timespec last_time;
1883         struct config *conf;
1884
1885         pthread_cleanup_push(rcu_unregister, NULL);
1886         rcu_register_thread();
1887         mlockall(MCL_CURRENT | MCL_FUTURE);
1888         vecs = (struct vectors *)ap;
1889         condlog(2, "path checkers start up");
1890
1891         /* Tweak start time for initial path check */
1892         if (clock_gettime(CLOCK_MONOTONIC, &last_time) != 0)
1893                 last_time.tv_sec = 0;
1894         else
1895                 last_time.tv_sec -= 1;
1896
1897         while (1) {
1898                 struct timespec diff_time, start_time, end_time;
1899                 int num_paths = 0, ticks = 0, signo, strict_timing, rc = 0;
1900                 sigset_t mask;
1901
1902                 if (clock_gettime(CLOCK_MONOTONIC, &start_time) != 0)
1903                         start_time.tv_sec = 0;
1904                 if (start_time.tv_sec && last_time.tv_sec) {
1905                         timespecsub(&start_time, &last_time, &diff_time);
1906                         condlog(4, "tick (%lu.%06lu secs)",
1907                                 diff_time.tv_sec, diff_time.tv_nsec / 1000);
1908                         last_time = start_time;
1909                         ticks = diff_time.tv_sec;
1910                 } else {
1911                         ticks = 1;
1912                         condlog(4, "tick (%d ticks)", ticks);
1913                 }
1914 #ifdef USE_SYSTEMD
1915                 if (use_watchdog)
1916                         sd_notify(0, "WATCHDOG=1");
1917 #endif
1918                 rc = set_config_state(DAEMON_RUNNING);
1919                 if (rc == ETIMEDOUT) {
1920                         condlog(4, "timeout waiting for DAEMON_IDLE");
1921                         continue;
1922                 }
1923
1924                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1925                 lock(&vecs->lock);
1926                 pthread_testcancel();
1927                 vector_foreach_slot (vecs->pathvec, pp, i) {
1928                         rc = check_path(vecs, pp, ticks);
1929                         if (rc < 0) {
1930                                 vector_del_slot(vecs->pathvec, i);
1931                                 free_path(pp);
1932                                 i--;
1933                         } else
1934                                 num_paths += rc;
1935                 }
1936                 lock_cleanup_pop(vecs->lock);
1937
1938                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1939                 lock(&vecs->lock);
1940                 pthread_testcancel();
1941                 defered_failback_tick(vecs->mpvec);
1942                 retry_count_tick(vecs->mpvec);
1943                 missing_uev_wait_tick(vecs);
1944                 lock_cleanup_pop(vecs->lock);
1945
1946                 if (count)
1947                         count--;
1948                 else {
1949                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
1950                         lock(&vecs->lock);
1951                         pthread_testcancel();
1952                         condlog(4, "map garbage collection");
1953                         mpvec_garbage_collector(vecs);
1954                         count = MAPGCINT;
1955                         lock_cleanup_pop(vecs->lock);
1956                 }
1957
1958                 diff_time.tv_nsec = 0;
1959                 if (start_time.tv_sec &&
1960                     clock_gettime(CLOCK_MONOTONIC, &end_time) == 0) {
1961                         timespecsub(&end_time, &start_time, &diff_time);
1962                         if (num_paths) {
1963                                 unsigned int max_checkint;
1964
1965                                 condlog(3, "checked %d path%s in %lu.%06lu secs",
1966                                         num_paths, num_paths > 1 ? "s" : "",
1967                                         diff_time.tv_sec,
1968                                         diff_time.tv_nsec / 1000);
1969                                 conf = get_multipath_config();
1970                                 max_checkint = conf->max_checkint;
1971                                 put_multipath_config(conf);
1972                                 if (diff_time.tv_sec > max_checkint)
1973                                         condlog(1, "path checkers took longer "
1974                                                 "than %lu seconds, consider "
1975                                                 "increasing max_polling_interval",
1976                                                 diff_time.tv_sec);
1977                         }
1978                 }
1979
1980                 post_config_state(DAEMON_IDLE);
1981                 conf = get_multipath_config();
1982                 strict_timing = conf->strict_timing;
1983                 put_multipath_config(conf);
1984                 if (!strict_timing)
1985                         sleep(1);
1986                 else {
1987                         timer_tick_it.it_interval.tv_sec = 0;
1988                         timer_tick_it.it_interval.tv_usec = 0;
1989                         if (diff_time.tv_nsec) {
1990                                 timer_tick_it.it_value.tv_sec = 0;
1991                                 timer_tick_it.it_value.tv_usec =
1992                                      1000UL * 1000 * 1000 - diff_time.tv_nsec;
1993                         } else {
1994                                 timer_tick_it.it_value.tv_sec = 1;
1995                                 timer_tick_it.it_value.tv_usec = 0;
1996                         }
1997                         setitimer(ITIMER_REAL, &timer_tick_it, NULL);
1998
1999                         sigemptyset(&mask);
2000                         sigaddset(&mask, SIGALRM);
2001                         condlog(3, "waiting for %lu.%06lu secs",
2002                                 timer_tick_it.it_value.tv_sec,
2003                                 timer_tick_it.it_value.tv_usec);
2004                         if (sigwait(&mask, &signo) != 0) {
2005                                 condlog(3, "sigwait failed with error %d",
2006                                         errno);
2007                                 conf = get_multipath_config();
2008                                 conf->strict_timing = 0;
2009                                 put_multipath_config(conf);
2010                                 break;
2011                         }
2012                 }
2013         }
2014         pthread_cleanup_pop(1);
2015         return NULL;
2016 }
2017
2018 int
2019 configure (struct vectors * vecs, int start_waiters)
2020 {
2021         struct multipath * mpp;
2022         struct path * pp;
2023         vector mpvec;
2024         int i, ret;
2025         struct config *conf;
2026
2027         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc())) {
2028                 condlog(0, "couldn't allocate path vec in configure");
2029                 return 1;
2030         }
2031
2032         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc())) {
2033                 condlog(0, "couldn't allocate multipath vec in configure");
2034                 return 1;
2035         }
2036
2037         if (!(mpvec = vector_alloc())) {
2038                 condlog(0, "couldn't allocate new maps vec in configure");
2039                 return 1;
2040         }
2041
2042         /*
2043          * probe for current path (from sysfs) and map (from dm) sets
2044          */
2045         ret = path_discovery(vecs->pathvec, DI_ALL);
2046         if (ret < 0) {
2047                 condlog(0, "configure failed at path discovery");
2048                 return 1;
2049         }
2050
2051         vector_foreach_slot (vecs->pathvec, pp, i){
2052                 conf = get_multipath_config();
2053                 if (filter_path(conf, pp) > 0){
2054                         vector_del_slot(vecs->pathvec, i);
2055                         free_path(pp);
2056                         i--;
2057                 }
2058                 else
2059                         pp->checkint = conf->checkint;
2060                 put_multipath_config(conf);
2061         }
2062         if (map_discovery(vecs)) {
2063                 condlog(0, "configure failed at map discovery");
2064                 return 1;
2065         }
2066
2067         /*
2068          * create new set of maps & push changed ones into dm
2069          */
2070         if (coalesce_paths(vecs, mpvec, NULL, 1, CMD_NONE)) {
2071                 condlog(0, "configure failed while coalescing paths");
2072                 return 1;
2073         }
2074
2075         /*
2076          * may need to remove some maps which are no longer relevant
2077          * e.g., due to blacklist changes in conf file
2078          */
2079         if (coalesce_maps(vecs, mpvec)) {
2080                 condlog(0, "configure failed while coalescing maps");
2081                 return 1;
2082         }
2083
2084         dm_lib_release();
2085
2086         sync_maps_state(mpvec);
2087         vector_foreach_slot(mpvec, mpp, i){
2088                 remember_wwid(mpp->wwid);
2089                 update_map_pr(mpp);
2090         }
2091
2092         /*
2093          * purge dm of old maps
2094          */
2095         remove_maps(vecs);
2096
2097         /*
2098          * save new set of maps formed by considering current path state
2099          */
2100         vector_free(vecs->mpvec);
2101         vecs->mpvec = mpvec;
2102
2103         /*
2104          * start dm event waiter threads for these new maps
2105          */
2106         vector_foreach_slot(vecs->mpvec, mpp, i) {
2107                 if (setup_multipath(vecs, mpp)) {
2108                         i--;
2109                         continue;
2110                 }
2111                 if (start_waiters) {
2112                         if (start_waiter_thread(mpp, vecs)) {
2113                                 remove_map(mpp, vecs, 1);
2114                                 i--;
2115                         }
2116                 }
2117         }
2118         return 0;
2119 }
2120
2121 int
2122 need_to_delay_reconfig(struct vectors * vecs)
2123 {
2124         struct multipath *mpp;
2125         int i;
2126
2127         if (!VECTOR_SIZE(vecs->mpvec))
2128                 return 0;
2129
2130         vector_foreach_slot(vecs->mpvec, mpp, i) {
2131                 if (mpp->wait_for_udev)
2132                         return 1;
2133         }
2134         return 0;
2135 }
2136
2137 void rcu_free_config(struct rcu_head *head)
2138 {
2139         struct config *conf = container_of(head, struct config, rcu);
2140
2141         free_config(conf);
2142 }
2143
2144 int
2145 reconfigure (struct vectors * vecs)
2146 {
2147         struct config * old, *conf;
2148
2149         conf = load_config(DEFAULT_CONFIGFILE);
2150         if (!conf)
2151                 return 1;
2152
2153         /*
2154          * free old map and path vectors ... they use old conf state
2155          */
2156         if (VECTOR_SIZE(vecs->mpvec))
2157                 remove_maps_and_stop_waiters(vecs);
2158
2159         free_pathvec(vecs->pathvec, FREE_PATHS);
2160         vecs->pathvec = NULL;
2161
2162         /* Re-read any timezone changes */
2163         tzset();
2164
2165         dm_drv_version(conf->version, TGT_MPATH);
2166         if (verbosity)
2167                 conf->verbosity = verbosity;
2168         if (bindings_read_only)
2169                 conf->bindings_read_only = bindings_read_only;
2170         if (ignore_new_devs)
2171                 conf->ignore_new_devs = ignore_new_devs;
2172         uxsock_timeout = conf->uxsock_timeout;
2173
2174         old = rcu_dereference(multipath_conf);
2175         rcu_assign_pointer(multipath_conf, conf);
2176         call_rcu(&old->rcu, rcu_free_config);
2177
2178         configure(vecs, 1);
2179
2180
2181         return 0;
2182 }
2183
2184 static struct vectors *
2185 init_vecs (void)
2186 {
2187         struct vectors * vecs;
2188
2189         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
2190
2191         if (!vecs)
2192                 return NULL;
2193
2194         pthread_mutex_init(&vecs->lock.mutex, NULL);
2195
2196         return vecs;
2197 }
2198
2199 static void *
2200 signal_set(int signo, void (*func) (int))
2201 {
2202         int r;
2203         struct sigaction sig;
2204         struct sigaction osig;
2205
2206         sig.sa_handler = func;
2207         sigemptyset(&sig.sa_mask);
2208         sig.sa_flags = 0;
2209
2210         r = sigaction(signo, &sig, &osig);
2211
2212         if (r < 0)
2213                 return (SIG_ERR);
2214         else
2215                 return (osig.sa_handler);
2216 }
2217
2218 void
2219 handle_signals(void)
2220 {
2221         if (exit_sig) {
2222                 condlog(2, "exit (signal)");
2223                 exit_daemon();
2224         }
2225         if (reconfig_sig) {
2226                 condlog(2, "reconfigure (signal)");
2227                 set_config_state(DAEMON_CONFIGURE);
2228         }
2229         if (log_reset_sig) {
2230                 condlog(2, "reset log (signal)");
2231                 pthread_mutex_lock(&logq_lock);
2232                 log_reset("multipathd");
2233                 pthread_mutex_unlock(&logq_lock);
2234         }
2235         exit_sig = 0;
2236         reconfig_sig = 0;
2237         log_reset_sig = 0;
2238 }
2239
2240 static void
2241 sighup (int sig)
2242 {
2243         reconfig_sig = 1;
2244 }
2245
2246 static void
2247 sigend (int sig)
2248 {
2249         exit_sig = 1;
2250 }
2251
2252 static void
2253 sigusr1 (int sig)
2254 {
2255         log_reset_sig = 1;
2256 }
2257
2258 static void
2259 sigusr2 (int sig)
2260 {
2261         condlog(3, "SIGUSR2 received");
2262 }
2263
2264 static void
2265 signal_init(void)
2266 {
2267         signal_set(SIGHUP, sighup);
2268         signal_set(SIGUSR1, sigusr1);
2269         signal_set(SIGUSR2, sigusr2);
2270         signal_set(SIGINT, sigend);
2271         signal_set(SIGTERM, sigend);
2272         signal_set(SIGPIPE, sigend);
2273 }
2274
2275 static void
2276 setscheduler (void)
2277 {
2278         int res;
2279         static struct sched_param sched_param = {
2280                 .sched_priority = 99
2281         };
2282
2283         res = sched_setscheduler (0, SCHED_RR, &sched_param);
2284
2285         if (res == -1)
2286                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
2287         return;
2288 }
2289
2290 static void
2291 set_oom_adj (void)
2292 {
2293 #ifdef OOM_SCORE_ADJ_MIN
2294         int retry = 1;
2295         char *file = "/proc/self/oom_score_adj";
2296         int score = OOM_SCORE_ADJ_MIN;
2297 #else
2298         int retry = 0;
2299         char *file = "/proc/self/oom_adj";
2300         int score = OOM_ADJUST_MIN;
2301 #endif
2302         FILE *fp;
2303         struct stat st;
2304         char *envp;
2305
2306         envp = getenv("OOMScoreAdjust");
2307         if (envp) {
2308                 condlog(3, "Using systemd provided OOMScoreAdjust");
2309                 return;
2310         }
2311         do {
2312                 if (stat(file, &st) == 0){
2313                         fp = fopen(file, "w");
2314                         if (!fp) {
2315                                 condlog(0, "couldn't fopen %s : %s", file,
2316                                         strerror(errno));
2317                                 return;
2318                         }
2319                         fprintf(fp, "%i", score);
2320                         fclose(fp);
2321                         return;
2322                 }
2323                 if (errno != ENOENT) {
2324                         condlog(0, "couldn't stat %s : %s", file,
2325                                 strerror(errno));
2326                         return;
2327                 }
2328 #ifdef OOM_ADJUST_MIN
2329                 file = "/proc/self/oom_adj";
2330                 score = OOM_ADJUST_MIN;
2331 #else
2332                 retry = 0;
2333 #endif
2334         } while (retry--);
2335         condlog(0, "couldn't adjust oom score");
2336 }
2337
2338 static int
2339 child (void * param)
2340 {
2341         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
2342         pthread_attr_t log_attr, misc_attr, uevent_attr;
2343         struct vectors * vecs;
2344         struct multipath * mpp;
2345         int i;
2346 #ifdef USE_SYSTEMD
2347         unsigned long checkint;
2348 #endif
2349         int rc;
2350         int pid_fd = -1;
2351         struct config *conf;
2352         char *envp;
2353
2354         mlockall(MCL_CURRENT | MCL_FUTURE);
2355         signal_init();
2356         rcu_init();
2357
2358         setup_thread_attr(&misc_attr, 64 * 1024, 0);
2359         setup_thread_attr(&uevent_attr, DEFAULT_UEVENT_STACKSIZE * 1024, 0);
2360         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
2361
2362         if (logsink == 1) {
2363                 setup_thread_attr(&log_attr, 64 * 1024, 0);
2364                 log_thread_start(&log_attr);
2365                 pthread_attr_destroy(&log_attr);
2366         }
2367         pid_fd = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
2368         if (pid_fd < 0) {
2369                 condlog(1, "failed to create pidfile");
2370                 if (logsink == 1)
2371                         log_thread_stop();
2372                 exit(1);
2373         }
2374
2375         post_config_state(DAEMON_START);
2376
2377         condlog(2, "--------start up--------");
2378         condlog(2, "read " DEFAULT_CONFIGFILE);
2379
2380         conf = load_config(DEFAULT_CONFIGFILE);
2381         if (!conf)
2382                 goto failed;
2383
2384         if (verbosity)
2385                 conf->verbosity = verbosity;
2386         if (bindings_read_only)
2387                 conf->bindings_read_only = bindings_read_only;
2388         if (ignore_new_devs)
2389                 conf->ignore_new_devs = ignore_new_devs;
2390         uxsock_timeout = conf->uxsock_timeout;
2391         rcu_assign_pointer(multipath_conf, conf);
2392         dm_init(conf->verbosity);
2393         dm_drv_version(conf->version, TGT_MPATH);
2394         if (init_checkers(conf->multipath_dir)) {
2395                 condlog(0, "failed to initialize checkers");
2396                 goto failed;
2397         }
2398         if (init_prio(conf->multipath_dir)) {
2399                 condlog(0, "failed to initialize prioritizers");
2400                 goto failed;
2401         }
2402
2403         setlogmask(LOG_UPTO(conf->verbosity + 3));
2404
2405         envp = getenv("LimitNOFILE");
2406
2407         if (envp) {
2408                 condlog(2,"Using systemd provided open fds limit of %s", envp);
2409         } else if (conf->max_fds) {
2410                 struct rlimit fd_limit;
2411
2412                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2413                         condlog(0, "can't get open fds limit: %s",
2414                                 strerror(errno));
2415                         fd_limit.rlim_cur = 0;
2416                         fd_limit.rlim_max = 0;
2417                 }
2418                 if (fd_limit.rlim_cur < conf->max_fds) {
2419                         fd_limit.rlim_cur = conf->max_fds;
2420                         if (fd_limit.rlim_max < conf->max_fds)
2421                                 fd_limit.rlim_max = conf->max_fds;
2422                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
2423                                 condlog(0, "can't set open fds limit to "
2424                                         "%lu/%lu : %s",
2425                                         fd_limit.rlim_cur, fd_limit.rlim_max,
2426                                         strerror(errno));
2427                         } else {
2428                                 condlog(3, "set open fds limit to %lu/%lu",
2429                                         fd_limit.rlim_cur, fd_limit.rlim_max);
2430                         }
2431                 }
2432
2433         }
2434
2435         vecs = gvecs = init_vecs();
2436         if (!vecs)
2437                 goto failed;
2438
2439         setscheduler();
2440         set_oom_adj();
2441
2442         dm_udev_set_sync_support(0);
2443 #ifdef USE_SYSTEMD
2444         envp = getenv("WATCHDOG_USEC");
2445         if (envp && sscanf(envp, "%lu", &checkint) == 1) {
2446                 /* Value is in microseconds */
2447                 conf->max_checkint = checkint / 1000000;
2448                 /* Rescale checkint */
2449                 if (conf->checkint > conf->max_checkint)
2450                         conf->checkint = conf->max_checkint;
2451                 else
2452                         conf->checkint = conf->max_checkint / 4;
2453                 condlog(3, "enabling watchdog, interval %d max %d",
2454                         conf->checkint, conf->max_checkint);
2455                 use_watchdog = conf->checkint;
2456         }
2457 #endif
2458         /*
2459          * Startup done, invalidate configuration
2460          */
2461         conf = NULL;
2462
2463         /*
2464          * Signal start of configuration
2465          */
2466         post_config_state(DAEMON_CONFIGURE);
2467
2468         init_path_check_interval(vecs);
2469
2470         /*
2471          * Start uevent listener early to catch events
2472          */
2473         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
2474                 condlog(0, "failed to create uevent thread: %d", rc);
2475                 goto failed;
2476         }
2477         pthread_attr_destroy(&uevent_attr);
2478         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
2479                 condlog(0, "failed to create cli listener: %d", rc);
2480                 goto failed;
2481         }
2482
2483         /*
2484          * start threads
2485          */
2486         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
2487                 condlog(0,"failed to create checker loop thread: %d", rc);
2488                 goto failed;
2489         }
2490         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
2491                 condlog(0, "failed to create uevent dispatcher: %d", rc);
2492                 goto failed;
2493         }
2494         pthread_attr_destroy(&misc_attr);
2495
2496 #ifdef USE_SYSTEMD
2497         sd_notify(0, "READY=1");
2498 #endif
2499
2500         while (running_state != DAEMON_SHUTDOWN) {
2501                 pthread_cleanup_push(config_cleanup, NULL);
2502                 pthread_mutex_lock(&config_lock);
2503                 if (running_state != DAEMON_CONFIGURE &&
2504                     running_state != DAEMON_SHUTDOWN) {
2505                         pthread_cond_wait(&config_cond, &config_lock);
2506                 }
2507                 pthread_cleanup_pop(1);
2508                 if (running_state == DAEMON_CONFIGURE) {
2509                         pthread_cleanup_push(cleanup_lock, &vecs->lock);
2510                         lock(&vecs->lock);
2511                         pthread_testcancel();
2512                         if (!need_to_delay_reconfig(vecs)) {
2513                                 reconfigure(vecs);
2514                         } else {
2515                                 conf = get_multipath_config();
2516                                 conf->delayed_reconfig = 1;
2517                                 put_multipath_config(conf);
2518                         }
2519                         lock_cleanup_pop(vecs->lock);
2520                         post_config_state(DAEMON_IDLE);
2521                 }
2522         }
2523
2524         lock(&vecs->lock);
2525         conf = get_multipath_config();
2526         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
2527                 vector_foreach_slot(vecs->mpvec, mpp, i)
2528                         dm_queue_if_no_path(mpp->alias, 0);
2529         put_multipath_config(conf);
2530         remove_maps_and_stop_waiters(vecs);
2531         unlock(&vecs->lock);
2532
2533         pthread_cancel(check_thr);
2534         pthread_cancel(uevent_thr);
2535         pthread_cancel(uxlsnr_thr);
2536         pthread_cancel(uevq_thr);
2537
2538         pthread_join(check_thr, NULL);
2539         pthread_join(uevent_thr, NULL);
2540         pthread_join(uxlsnr_thr, NULL);
2541         pthread_join(uevq_thr, NULL);
2542
2543         lock(&vecs->lock);
2544         free_pathvec(vecs->pathvec, FREE_PATHS);
2545         vecs->pathvec = NULL;
2546         unlock(&vecs->lock);
2547
2548         pthread_mutex_destroy(&vecs->lock.mutex);
2549         FREE(vecs);
2550         vecs = NULL;
2551
2552         cleanup_checkers();
2553         cleanup_prio();
2554
2555         dm_lib_release();
2556         dm_lib_exit();
2557
2558         /* We're done here */
2559         condlog(3, "unlink pidfile");
2560         unlink(DEFAULT_PIDFILE);
2561
2562         condlog(2, "--------shut down-------");
2563
2564         if (logsink == 1)
2565                 log_thread_stop();
2566
2567         /*
2568          * Freeing config must be done after condlog() and dm_lib_exit(),
2569          * because logging functions like dlog() and dm_write_log()
2570          * reference the config.
2571          */
2572         conf = rcu_dereference(multipath_conf);
2573         rcu_assign_pointer(multipath_conf, NULL);
2574         call_rcu(&conf->rcu, rcu_free_config);
2575         udev_unref(udev);
2576         udev = NULL;
2577         pthread_attr_destroy(&waiter_attr);
2578 #ifdef _DEBUG_
2579         dbg_free_final(NULL);
2580 #endif
2581
2582 #ifdef USE_SYSTEMD
2583         sd_notify(0, "ERRNO=0");
2584 #endif
2585         exit(0);
2586
2587 failed:
2588 #ifdef USE_SYSTEMD
2589         sd_notify(0, "ERRNO=1");
2590 #endif
2591         if (pid_fd >= 0)
2592                 close(pid_fd);
2593         exit(1);
2594 }
2595
2596 static int
2597 daemonize(void)
2598 {
2599         int pid;
2600         int dev_null_fd;
2601
2602         if( (pid = fork()) < 0){
2603                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
2604                 return -1;
2605         }
2606         else if (pid != 0)
2607                 return pid;
2608
2609         setsid();
2610
2611         if ( (pid = fork()) < 0)
2612                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
2613         else if (pid != 0)
2614                 _exit(0);
2615
2616         if (chdir("/") < 0)
2617                 fprintf(stderr, "cannot chdir to '/', continuing\n");
2618
2619         dev_null_fd = open("/dev/null", O_RDWR);
2620         if (dev_null_fd < 0){
2621                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
2622                         strerror(errno));
2623                 _exit(0);
2624         }
2625
2626         close(STDIN_FILENO);
2627         if (dup(dev_null_fd) < 0) {
2628                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
2629                         strerror(errno));
2630                 _exit(0);
2631         }
2632         close(STDOUT_FILENO);
2633         if (dup(dev_null_fd) < 0) {
2634                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
2635                         strerror(errno));
2636                 _exit(0);
2637         }
2638         close(STDERR_FILENO);
2639         if (dup(dev_null_fd) < 0) {
2640                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
2641                         strerror(errno));
2642                 _exit(0);
2643         }
2644         close(dev_null_fd);
2645         daemon_pid = getpid();
2646         return 0;
2647 }
2648
2649 int
2650 main (int argc, char *argv[])
2651 {
2652         extern char *optarg;
2653         extern int optind;
2654         int arg;
2655         int err;
2656         int foreground = 0;
2657         struct config *conf;
2658
2659         ANNOTATE_BENIGN_RACE_SIZED(&multipath_conf, sizeof(multipath_conf),
2660                                    "Manipulated through RCU");
2661         ANNOTATE_BENIGN_RACE_SIZED(&running_state, sizeof(running_state),
2662                 "Suppress complaints about unprotected running_state reads");
2663         ANNOTATE_BENIGN_RACE_SIZED(&uxsock_timeout, sizeof(uxsock_timeout),
2664                 "Suppress complaints about this scalar variable");
2665
2666         logsink = 1;
2667
2668         if (getuid() != 0) {
2669                 fprintf(stderr, "need to be root\n");
2670                 exit(1);
2671         }
2672
2673         /* make sure we don't lock any path */
2674         if (chdir("/") < 0)
2675                 fprintf(stderr, "can't chdir to root directory : %s\n",
2676                         strerror(errno));
2677         umask(umask(077) | 022);
2678
2679         pthread_cond_init_mono(&config_cond);
2680
2681         udev = udev_new();
2682
2683         while ((arg = getopt(argc, argv, ":dsv:k::Bn")) != EOF ) {
2684                 switch(arg) {
2685                 case 'd':
2686                         foreground = 1;
2687                         if (logsink > 0)
2688                                 logsink = 0;
2689                         //debug=1; /* ### comment me out ### */
2690                         break;
2691                 case 'v':
2692                         if (sizeof(optarg) > sizeof(char *) ||
2693                             !isdigit(optarg[0]))
2694                                 exit(1);
2695
2696                         verbosity = atoi(optarg);
2697                         break;
2698                 case 's':
2699                         logsink = -1;
2700                         break;
2701                 case 'k':
2702                         conf = load_config(DEFAULT_CONFIGFILE);
2703                         if (!conf)
2704                                 exit(1);
2705                         if (verbosity)
2706                                 conf->verbosity = verbosity;
2707                         uxsock_timeout = conf->uxsock_timeout;
2708                         uxclnt(optarg, uxsock_timeout + 100);
2709                         free_config(conf);
2710                         exit(0);
2711                 case 'B':
2712                         bindings_read_only = 1;
2713                         break;
2714                 case 'n':
2715                         ignore_new_devs = 1;
2716                         break;
2717                 default:
2718                         fprintf(stderr, "Invalid argument '-%c'\n",
2719                                 optopt);
2720                         exit(1);
2721                 }
2722         }
2723         if (optind < argc) {
2724                 char cmd[CMDSIZE];
2725                 char * s = cmd;
2726                 char * c = s;
2727
2728                 conf = load_config(DEFAULT_CONFIGFILE);
2729                 if (!conf)
2730                         exit(1);
2731                 if (verbosity)
2732                         conf->verbosity = verbosity;
2733                 uxsock_timeout = conf->uxsock_timeout;
2734                 memset(cmd, 0x0, CMDSIZE);
2735                 while (optind < argc) {
2736                         if (strchr(argv[optind], ' '))
2737                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
2738                         else
2739                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
2740                         optind++;
2741                 }
2742                 c += snprintf(c, s + CMDSIZE - c, "\n");
2743                 uxclnt(s, uxsock_timeout + 100);
2744                 free_config(conf);
2745                 exit(0);
2746         }
2747
2748         if (foreground) {
2749                 if (!isatty(fileno(stdout)))
2750                         setbuf(stdout, NULL);
2751                 err = 0;
2752                 daemon_pid = getpid();
2753         } else
2754                 err = daemonize();
2755
2756         if (err < 0)
2757                 /* error */
2758                 exit(1);
2759         else if (err > 0)
2760                 /* parent dies */
2761                 exit(0);
2762         else
2763                 /* child lives */
2764                 return (child(NULL));
2765 }
2766
2767 void *  mpath_pr_event_handler_fn (void * pathp )
2768 {
2769         struct multipath * mpp;
2770         int i,j, ret, isFound;
2771         struct path * pp = (struct path *)pathp;
2772         unsigned char *keyp;
2773         uint64_t prkey;
2774         struct prout_param_descriptor *param;
2775         struct prin_resp *resp;
2776
2777         mpp = pp->mpp;
2778
2779         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
2780         if (!resp){
2781                 condlog(0,"%s Alloc failed for prin response", pp->dev);
2782                 return NULL;
2783         }
2784
2785         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
2786         if (ret != MPATH_PR_SUCCESS )
2787         {
2788                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
2789                 goto out;
2790         }
2791
2792         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
2793                         resp->prin_descriptor.prin_readkeys.additional_length );
2794
2795         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
2796         {
2797                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
2798                 ret = MPATH_PR_SUCCESS;
2799                 goto out;
2800         }
2801         prkey = 0;
2802         keyp = (unsigned char *)mpp->reservation_key;
2803         for (j = 0; j < 8; ++j) {
2804                 if (j > 0)
2805                         prkey <<= 8;
2806                 prkey |= *keyp;
2807                 ++keyp;
2808         }
2809         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
2810
2811         isFound =0;
2812         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
2813         {
2814                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
2815                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
2816                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
2817                 {
2818                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
2819                         isFound =1;
2820                         break;
2821                 }
2822         }
2823         if (!isFound)
2824         {
2825                 condlog(0, "%s: Either device not registered or ", pp->dev);
2826                 condlog(0, "host is not authorised for registration. Skip path");
2827                 ret = MPATH_PR_OTHER;
2828                 goto out;
2829         }
2830
2831         param= malloc(sizeof(struct prout_param_descriptor));
2832         memset(param, 0 , sizeof(struct prout_param_descriptor));
2833
2834         for (j = 7; j >= 0; --j) {
2835                 param->sa_key[j] = (prkey & 0xff);
2836                 prkey >>= 8;
2837         }
2838         param->num_transportid = 0;
2839
2840         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
2841
2842         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
2843         if (ret != MPATH_PR_SUCCESS )
2844         {
2845                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
2846         }
2847         mpp->prflag = 1;
2848
2849         free(param);
2850 out:
2851         free(resp);
2852         return NULL;
2853 }
2854
2855 int mpath_pr_event_handle(struct path *pp)
2856 {
2857         pthread_t thread;
2858         int rc;
2859         pthread_attr_t attr;
2860         struct multipath * mpp;
2861
2862         mpp = pp->mpp;
2863
2864         if (!mpp->reservation_key)
2865                 return -1;
2866
2867         pthread_attr_init(&attr);
2868         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2869
2870         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2871         if (rc) {
2872                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2873                 return -1;
2874         }
2875         pthread_attr_destroy(&attr);
2876         rc = pthread_join(thread, NULL);
2877         return 0;
2878 }