libmutipath: deprecate delay_*_checks
[multipath-tools/.git] / libmultipath / configure.c
1 /*
2  * Copyright (c) 2003, 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Benjamin Marzinski, Redhat
4  * Copyright (c) 2005 Kiyoshi Ueda, NEC
5  * Copyright (c) 2005 Patrick Caulfield, Redhat
6  * Copyright (c) 2005 Edward Goggin, EMC
7  */
8
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <unistd.h>
12 #include <string.h>
13 #include <sys/file.h>
14 #include <errno.h>
15 #include <ctype.h>
16 #include <libdevmapper.h>
17 #include <libudev.h>
18 #include "mpath_cmd.h"
19
20 #include "checkers.h"
21 #include "vector.h"
22 #include "memory.h"
23 #include "devmapper.h"
24 #include "defaults.h"
25 #include "structs.h"
26 #include "structs_vec.h"
27 #include "dmparser.h"
28 #include "config.h"
29 #include "blacklist.h"
30 #include "propsel.h"
31 #include "discovery.h"
32 #include "debug.h"
33 #include "switchgroup.h"
34 #include "dm-generic.h"
35 #include "print.h"
36 #include "configure.h"
37 #include "pgpolicies.h"
38 #include "dict.h"
39 #include "alias.h"
40 #include "prio.h"
41 #include "util.h"
42 #include "uxsock.h"
43 #include "wwids.h"
44 #include "sysfs.h"
45 #include "io_err_stat.h"
46
47 /* Time in ms to wait for pending checkers in setup_map() */
48 #define WAIT_CHECKERS_PENDING_MS 10
49 #define WAIT_ALL_CHECKERS_PENDING_MS 90
50
51 /* group paths in pg by host adapter
52  */
53 int group_by_host_adapter(struct pathgroup *pgp, vector adapters)
54 {
55         struct adapter_group *agp;
56         struct host_group *hgp;
57         struct path *pp, *pp1;
58         char adapter_name1[SLOT_NAME_SIZE];
59         char adapter_name2[SLOT_NAME_SIZE];
60         int i, j;
61         int found_hostgroup = 0;
62
63         while (VECTOR_SIZE(pgp->paths) > 0) {
64
65                 pp = VECTOR_SLOT(pgp->paths, 0);
66
67                 if (sysfs_get_host_adapter_name(pp, adapter_name1))
68                         goto out;
69                 /* create a new host adapter group
70                  */
71                 agp = alloc_adaptergroup();
72                 if (!agp)
73                         goto out;
74                 agp->pgp = pgp;
75
76                 strlcpy(agp->adapter_name, adapter_name1, SLOT_NAME_SIZE);
77                 store_adaptergroup(adapters, agp);
78
79                 /* create a new host port group
80                  */
81                 hgp = alloc_hostgroup();
82                 if (!hgp)
83                         goto out;
84                 if (store_hostgroup(agp->host_groups, hgp))
85                         goto out;
86
87                 hgp->host_no = pp->sg_id.host_no;
88                 agp->num_hosts++;
89                 if (store_path(hgp->paths, pp))
90                         goto out;
91
92                 hgp->num_paths++;
93                 /* delete path from path group
94                  */
95                 vector_del_slot(pgp->paths, 0);
96
97                 /* add all paths belonging to same host adapter
98                  */
99                 vector_foreach_slot(pgp->paths, pp1, i) {
100                         if (sysfs_get_host_adapter_name(pp1, adapter_name2))
101                                 goto out;
102                         if (strcmp(adapter_name1, adapter_name2) == 0) {
103                                 found_hostgroup = 0;
104                                 vector_foreach_slot(agp->host_groups, hgp, j) {
105                                         if (hgp->host_no == pp1->sg_id.host_no) {
106                                                 if (store_path(hgp->paths, pp1))
107                                                         goto out;
108                                                 hgp->num_paths++;
109                                                 found_hostgroup = 1;
110                                                 break;
111                                         }
112                                 }
113                                 if (!found_hostgroup) {
114                                         /* this path belongs to new host port
115                                          * within this adapter
116                                          */
117                                         hgp = alloc_hostgroup();
118                                         if (!hgp)
119                                                 goto out;
120
121                                         if (store_hostgroup(agp->host_groups, hgp))
122                                                 goto out;
123
124                                         agp->num_hosts++;
125                                         if (store_path(hgp->paths, pp1))
126                                                 goto out;
127
128                                         hgp->host_no = pp1->sg_id.host_no;
129                                         hgp->num_paths++;
130                                 }
131                                 /* delete paths from original path_group
132                                  * as they are added into adapter group now
133                                  */
134                                 vector_del_slot(pgp->paths, i);
135                                 i--;
136                         }
137                 }
138         }
139         return 0;
140
141 out:    /* add back paths into pg as re-ordering failed
142          */
143         vector_foreach_slot(adapters, agp, i) {
144                         vector_foreach_slot(agp->host_groups, hgp, j) {
145                                 while (VECTOR_SIZE(hgp->paths) > 0) {
146                                         pp = VECTOR_SLOT(hgp->paths, 0);
147                                         if (store_path(pgp->paths, pp))
148                                                 condlog(3, "failed to restore "
149                                                 "path %s into path group",
150                                                  pp->dev);
151                                         vector_del_slot(hgp->paths, 0);
152                                 }
153                         }
154                 }
155         free_adaptergroup(adapters);
156         return 1;
157 }
158
159 /* re-order paths in pg by alternating adapters and host ports
160  * for optimized selection
161  */
162 int order_paths_in_pg_by_alt_adapters(struct pathgroup *pgp, vector adapters,
163                  int total_paths)
164 {
165         int next_adapter_index = 0;
166         struct adapter_group *agp;
167         struct host_group *hgp;
168         struct path *pp;
169
170         while (total_paths > 0) {
171                 agp = VECTOR_SLOT(adapters, next_adapter_index);
172                 if (!agp) {
173                         condlog(0, "can't get adapter group %d", next_adapter_index);
174                         return 1;
175                 }
176
177                 hgp = VECTOR_SLOT(agp->host_groups, agp->next_host_index);
178                 if (!hgp) {
179                         condlog(0, "can't get host group %d of adapter group %d", next_adapter_index, agp->next_host_index);
180                         return 1;
181                 }
182
183                 if (!hgp->num_paths) {
184                         agp->next_host_index++;
185                         agp->next_host_index %= agp->num_hosts;
186                         next_adapter_index++;
187                         next_adapter_index %= VECTOR_SIZE(adapters);
188                         continue;
189                 }
190
191                 pp  = VECTOR_SLOT(hgp->paths, 0);
192
193                 if (store_path(pgp->paths, pp))
194                         return 1;
195
196                 total_paths--;
197
198                 vector_del_slot(hgp->paths, 0);
199
200                 hgp->num_paths--;
201
202                 agp->next_host_index++;
203                 agp->next_host_index %= agp->num_hosts;
204                 next_adapter_index++;
205                 next_adapter_index %= VECTOR_SIZE(adapters);
206         }
207
208         /* all paths are added into path_group
209          * in crafted child order
210          */
211         return 0;
212 }
213
214 /* round-robin: order paths in path group to alternate
215  * between all host adapters
216  */
217 int rr_optimize_path_order(struct pathgroup *pgp)
218 {
219         vector adapters;
220         struct path *pp;
221         int total_paths;
222         int i;
223
224         total_paths = VECTOR_SIZE(pgp->paths);
225         vector_foreach_slot(pgp->paths, pp, i) {
226                 if (pp->sg_id.proto_id != SCSI_PROTOCOL_FCP &&
227                         pp->sg_id.proto_id != SCSI_PROTOCOL_SAS &&
228                         pp->sg_id.proto_id != SCSI_PROTOCOL_ISCSI &&
229                         pp->sg_id.proto_id != SCSI_PROTOCOL_SRP) {
230                         /* return success as default path order
231                          * is maintained in path group
232                          */
233                         return 0;
234                 }
235         }
236         adapters = vector_alloc();
237         if (!adapters)
238                 return 0;
239
240         /* group paths in path group by host adapters
241          */
242         if (group_by_host_adapter(pgp, adapters)) {
243                 /* already freed adapters */
244                 condlog(3, "Failed to group paths by adapters");
245                 return 0;
246         }
247
248         /* re-order paths in pg to alternate between adapters and host ports
249          */
250         if (order_paths_in_pg_by_alt_adapters(pgp, adapters, total_paths)) {
251                 condlog(3, "Failed to re-order paths in pg by adapters "
252                         "and host ports");
253                 free_adaptergroup(adapters);
254                 /* return failure as original paths are
255                  * removed form pgp
256                  */
257                 return 1;
258         }
259
260         free_adaptergroup(adapters);
261         return 0;
262 }
263
264 static int wait_for_pending_paths(struct multipath *mpp,
265                                   struct config *conf,
266                                   int n_pending, int goal, int wait_ms)
267 {
268         static const struct timespec millisec =
269                 { .tv_sec = 0, .tv_nsec = 1000*1000 };
270         int i, j;
271         struct path *pp;
272         struct pathgroup *pgp;
273         struct timespec ts;
274
275         do {
276                 vector_foreach_slot(mpp->pg, pgp, i) {
277                         vector_foreach_slot(pgp->paths, pp, j) {
278                                 if (pp->state != PATH_PENDING)
279                                         continue;
280                                 pp->state = get_state(pp, conf,
281                                                       0, PATH_PENDING);
282                                 if (pp->state != PATH_PENDING &&
283                                     --n_pending <= goal)
284                                         return 0;
285                         }
286                 }
287                 ts = millisec;
288                 while (nanosleep(&ts, &ts) != 0 && errno == EINTR)
289                         /* nothing */;
290         } while (--wait_ms > 0);
291
292         return n_pending;
293 }
294
295 int setup_map(struct multipath *mpp, char *params, int params_size,
296               struct vectors *vecs)
297 {
298         struct pathgroup * pgp;
299         struct config *conf;
300         int i, n_paths, marginal_pathgroups;
301
302         /*
303          * don't bother if devmap size is unknown
304          */
305         if (mpp->size <= 0) {
306                 condlog(3, "%s: devmap size is unknown", mpp->alias);
307                 return 1;
308         }
309
310         /*
311          * free features, selector, and hwhandler properties if they are being reused
312          */
313         free_multipath_attributes(mpp);
314         if (mpp->disable_queueing && VECTOR_SIZE(mpp->paths) != 0)
315                 mpp->disable_queueing = 0;
316
317         /*
318          * properties selectors
319          *
320          * Ordering matters for some properties:
321          * - features after no_path_retry and retain_hwhandler
322          * - hwhandler after retain_hwhandler
323          * No guarantee that this list is complete, check code in
324          * propsel.c if in doubt.
325          */
326         conf = get_multipath_config();
327         pthread_cleanup_push(put_multipath_config, conf);
328
329         select_pgfailback(conf, mpp);
330         select_pgpolicy(conf, mpp);
331         select_selector(conf, mpp);
332         select_no_path_retry(conf, mpp);
333         select_retain_hwhandler(conf, mpp);
334         select_features(conf, mpp);
335         select_hwhandler(conf, mpp);
336         select_rr_weight(conf, mpp);
337         select_minio(conf, mpp);
338         select_mode(conf, mpp);
339         select_uid(conf, mpp);
340         select_gid(conf, mpp);
341         select_fast_io_fail(conf, mpp);
342         select_dev_loss(conf, mpp);
343         select_reservation_key(conf, mpp);
344         select_deferred_remove(conf, mpp);
345         select_marginal_path_err_sample_time(conf, mpp);
346         select_marginal_path_err_rate_threshold(conf, mpp);
347         select_marginal_path_err_recheck_gap_time(conf, mpp);
348         select_marginal_path_double_failed_time(conf, mpp);
349         select_san_path_err_threshold(conf, mpp);
350         select_san_path_err_forget_rate(conf, mpp);
351         select_san_path_err_recovery_time(conf, mpp);
352         select_delay_checks(conf, mpp);
353         select_skip_kpartx(conf, mpp);
354         select_max_sectors_kb(conf, mpp);
355         select_ghost_delay(conf, mpp);
356         select_flush_on_last_del(conf, mpp);
357
358         sysfs_set_scsi_tmo(mpp, conf->checkint);
359         marginal_pathgroups = conf->marginal_pathgroups;
360         pthread_cleanup_pop(1);
361
362         if (marginal_path_check_enabled(mpp))
363                 start_io_err_stat_thread(vecs);
364
365         n_paths = VECTOR_SIZE(mpp->paths);
366         /*
367          * assign paths to path groups -- start with no groups and all paths
368          * in mpp->paths
369          */
370         if (mpp->pg) {
371                 vector_foreach_slot (mpp->pg, pgp, i)
372                         free_pathgroup(pgp, KEEP_PATHS);
373
374                 vector_free(mpp->pg);
375                 mpp->pg = NULL;
376         }
377         if (group_paths(mpp, marginal_pathgroups))
378                 return 1;
379
380         /*
381          * If async state detection is used, see if pending state checks
382          * have finished, to get nr_active right. We can't wait until the
383          * checkers time out, as that may take 30s or more, and we are
384          * holding the vecs lock.
385          */
386         if (conf->force_sync == 0 && n_paths > 0) {
387                 int n_pending = pathcount(mpp, PATH_PENDING);
388
389                 if (n_pending > 0)
390                         n_pending = wait_for_pending_paths(
391                                 mpp, conf, n_pending, 0,
392                                 WAIT_CHECKERS_PENDING_MS);
393                 /* ALL paths pending - wait some more, but be satisfied
394                    with only some paths finished */
395                 if (n_pending == n_paths)
396                         n_pending = wait_for_pending_paths(
397                                 mpp, conf, n_pending,
398                                 n_paths >= 4 ? 2 : 1,
399                                 WAIT_ALL_CHECKERS_PENDING_MS);
400                 if (n_pending > 0)
401                         condlog(2, "%s: setting up map with %d/%d path checkers pending",
402                                 mpp->alias, n_pending, n_paths);
403         }
404         mpp->nr_active = pathcount(mpp, PATH_UP) + pathcount(mpp, PATH_GHOST);
405
406         /*
407          * ponders each path group and determine highest prio pg
408          * to switch over (default to first)
409          */
410         mpp->bestpg = select_path_group(mpp);
411
412         /* re-order paths in all path groups in an optimized way
413          * for round-robin path selectors to get maximum throughput.
414          */
415         if (!strncmp(mpp->selector, "round-robin", 11)) {
416                 vector_foreach_slot(mpp->pg, pgp, i) {
417                         if (VECTOR_SIZE(pgp->paths) <= 2)
418                                 continue;
419                         if (rr_optimize_path_order(pgp)) {
420                                 condlog(2, "cannot re-order paths for "
421                                         "optimization: %s",
422                                         mpp->alias);
423                                 return 1;
424                         }
425                 }
426         }
427
428         /*
429          * transform the mp->pg vector of vectors of paths
430          * into a mp->params strings to feed the device-mapper
431          */
432         if (assemble_map(mpp, params, params_size)) {
433                 condlog(0, "%s: problem assembing map", mpp->alias);
434                 return 1;
435         }
436         return 0;
437 }
438
439 static void
440 compute_pgid(struct pathgroup * pgp)
441 {
442         struct path * pp;
443         int i;
444
445         vector_foreach_slot (pgp->paths, pp, i)
446                 pgp->id ^= (long)pp;
447 }
448
449 static int
450 pgcmp (struct multipath * mpp, struct multipath * cmpp)
451 {
452         int i, j;
453         struct pathgroup * pgp;
454         struct pathgroup * cpgp;
455         int r = 0;
456
457         if (!mpp)
458                 return 0;
459
460         vector_foreach_slot (mpp->pg, pgp, i) {
461                 compute_pgid(pgp);
462
463                 vector_foreach_slot (cmpp->pg, cpgp, j) {
464                         if (pgp->id == cpgp->id &&
465                             !pathcmp(pgp, cpgp)) {
466                                 r = 0;
467                                 break;
468                         }
469                         r++;
470                 }
471                 if (r)
472                         return r;
473         }
474         return r;
475 }
476
477 static struct udev_device *
478 get_udev_for_mpp(const struct multipath *mpp)
479 {
480         dev_t devnum;
481         struct udev_device *udd;
482
483         if (!mpp || !mpp->dmi) {
484                 condlog(1, "%s called with empty mpp", __func__);
485                 return NULL;
486         }
487
488         devnum = makedev(mpp->dmi->major, mpp->dmi->minor);
489         udd = udev_device_new_from_devnum(udev, 'b', devnum);
490         if (!udd) {
491                 condlog(1, "failed to get udev device for %s", mpp->alias);
492                 return NULL;
493         }
494         return udd;
495 }
496
497 static void
498 trigger_udev_change(const struct multipath *mpp)
499 {
500         static const char change[] = "change";
501         struct udev_device *udd = get_udev_for_mpp(mpp);
502         if (!udd)
503                 return;
504         condlog(3, "triggering %s uevent for %s", change, mpp->alias);
505         sysfs_attr_set_value(udd, "uevent", change, sizeof(change)-1);
506         udev_device_unref(udd);
507 }
508
509 static void trigger_partitions_udev_change(struct udev_device *dev,
510                                            const char *action, int len)
511 {
512         struct udev_enumerate *part_enum;
513         struct udev_list_entry *item;
514
515         part_enum = udev_enumerate_new(udev);
516         if (!part_enum)
517                 return;
518
519         if (udev_enumerate_add_match_parent(part_enum, dev) < 0 ||
520             udev_enumerate_add_match_subsystem(part_enum, "block") < 0 ||
521             udev_enumerate_scan_devices(part_enum) < 0)
522                 goto unref;
523
524         udev_list_entry_foreach(item,
525                                 udev_enumerate_get_list_entry(part_enum)) {
526                 const char *syspath;
527                 struct udev_device *part;
528
529                 syspath = udev_list_entry_get_name(item);
530                 part = udev_device_new_from_syspath(udev, syspath);
531                 if (!part)
532                         continue;
533
534                 if (!strcmp("partition", udev_device_get_devtype(part))) {
535                         condlog(4, "%s: triggering %s event for %s", __func__,
536                                 action, syspath);
537                         sysfs_attr_set_value(part, "uevent", action, len);
538                 }
539                 udev_device_unref(part);
540         }
541 unref:
542         udev_enumerate_unref(part_enum);
543 }
544
545 void
546 trigger_paths_udev_change(struct multipath *mpp, bool is_mpath)
547 {
548         struct pathgroup *pgp;
549         struct path *pp;
550         int i, j;
551         /*
552          * If a path changes from multipath to non-multipath, we must
553          * synthesize an artificial "add" event, otherwise the LVM2 rules
554          * (69-lvm2-lvmetad.rules) won't pick it up. Otherwise, we'd just
555          * irritate ourselves with an "add", so use "change".
556          */
557         const char *action = is_mpath ? "change" : "add";
558
559         if (!mpp || !mpp->pg)
560                 return;
561
562         vector_foreach_slot (mpp->pg, pgp, i) {
563                 if (!pgp->paths)
564                         continue;
565                 vector_foreach_slot(pgp->paths, pp, j) {
566                         const char *env;
567
568                         if (!pp->udev)
569                                 continue;
570                         /*
571                          * Paths that are already classified as multipath
572                          * members don't need another uevent.
573                          */
574                         env = udev_device_get_property_value(
575                                 pp->udev, "DM_MULTIPATH_DEVICE_PATH");
576
577                         if (is_mpath && env != NULL && !strcmp(env, "1")) {
578                                 /*
579                                  * If FIND_MULTIPATHS_WAIT_UNTIL is not "0",
580                                  * path is in "maybe" state and timer is running
581                                  * Send uevent now (see multipath.rules).
582                                  */
583                                 env = udev_device_get_property_value(
584                                         pp->udev, "FIND_MULTIPATHS_WAIT_UNTIL");
585                                 if (env == NULL || !strcmp(env, "0"))
586                                         continue;
587                         } else if (!is_mpath &&
588                                    (env == NULL || !strcmp(env, "0")))
589                                 continue;
590
591                         condlog(3, "triggering %s uevent for %s (is %smultipath member)",
592                                 action, pp->dev, is_mpath ? "" : "no ");
593                         sysfs_attr_set_value(pp->udev, "uevent",
594                                              action, strlen(action));
595                         trigger_partitions_udev_change(pp->udev, action,
596                                                        strlen(action));
597                 }
598         }
599
600         mpp->needs_paths_uevent = 0;
601 }
602
603 static int
604 is_mpp_known_to_udev(const struct multipath *mpp)
605 {
606         struct udev_device *udd = get_udev_for_mpp(mpp);
607         int ret = (udd != NULL);
608         udev_device_unref(udd);
609         return ret;
610 }
611
612 static int
613 sysfs_set_max_sectors_kb(struct multipath *mpp, int is_reload)
614 {
615         struct pathgroup * pgp;
616         struct path *pp;
617         char buff[11];
618         int i, j, ret, err = 0;
619         struct udev_device *udd;
620         int max_sectors_kb;
621
622         if (mpp->max_sectors_kb == MAX_SECTORS_KB_UNDEF)
623                 return 0;
624         max_sectors_kb = mpp->max_sectors_kb;
625         if (is_reload) {
626                 if (!mpp->dmi && dm_get_info(mpp->alias, &mpp->dmi) != 0) {
627                         condlog(1, "failed to get dm info for %s", mpp->alias);
628                         return 1;
629                 }
630                 udd = get_udev_for_mpp(mpp);
631                 if (!udd) {
632                         condlog(1, "failed to get udev device to set max_sectors_kb for %s", mpp->alias);
633                         return 1;
634                 }
635                 ret = sysfs_attr_get_value(udd, "queue/max_sectors_kb", buff,
636                                            sizeof(buff));
637                 udev_device_unref(udd);
638                 if (ret <= 0) {
639                         condlog(1, "failed to get current max_sectors_kb from %s", mpp->alias);
640                         return 1;
641                 }
642                 if (sscanf(buff, "%u\n", &max_sectors_kb) != 1) {
643                         condlog(1, "can't parse current max_sectors_kb from %s",
644                                 mpp->alias);
645                         return 1;
646                 }
647         }
648         snprintf(buff, 11, "%d", max_sectors_kb);
649
650         vector_foreach_slot (mpp->pg, pgp, i) {
651                 vector_foreach_slot(pgp->paths, pp, j) {
652                         ret = sysfs_attr_set_value(pp->udev,
653                                                    "queue/max_sectors_kb",
654                                                    buff, strlen(buff));
655                         if (ret < 0) {
656                                 condlog(1, "failed setting max_sectors_kb on %s : %s", pp->dev, strerror(-ret));
657                                 err = 1;
658                         }
659                 }
660         }
661         return err;
662 }
663
664 static void
665 select_action (struct multipath * mpp, vector curmp, int force_reload)
666 {
667         struct multipath * cmpp;
668         struct multipath * cmpp_by_name;
669         char * mpp_feat, * cmpp_feat;
670
671         cmpp = find_mp_by_wwid(curmp, mpp->wwid);
672         cmpp_by_name = find_mp_by_alias(curmp, mpp->alias);
673
674         if (!cmpp_by_name) {
675                 if (cmpp) {
676                         condlog(2, "%s: rename %s to %s", mpp->wwid,
677                                 cmpp->alias, mpp->alias);
678                         strlcpy(mpp->alias_old, cmpp->alias, WWID_SIZE);
679                         mpp->action = ACT_RENAME;
680                         if (force_reload) {
681                                 mpp->force_udev_reload = 1;
682                                 mpp->action = ACT_FORCERENAME;
683                         }
684                         return;
685                 }
686                 mpp->action = ACT_CREATE;
687                 condlog(3, "%s: set ACT_CREATE (map does not exist)",
688                         mpp->alias);
689                 return;
690         }
691
692         if (!cmpp) {
693                 condlog(2, "%s: remove (wwid changed)", mpp->alias);
694                 dm_flush_map(mpp->alias);
695                 strlcpy(cmpp_by_name->wwid, mpp->wwid, WWID_SIZE);
696                 drop_multipath(curmp, cmpp_by_name->wwid, KEEP_PATHS);
697                 mpp->action = ACT_CREATE;
698                 condlog(3, "%s: set ACT_CREATE (map wwid change)",
699                         mpp->alias);
700                 return;
701         }
702
703         if (cmpp != cmpp_by_name) {
704                 condlog(2, "%s: unable to rename %s to %s (%s is used by %s)",
705                         mpp->wwid, cmpp->alias, mpp->alias,
706                         mpp->alias, cmpp_by_name->wwid);
707                 /* reset alias to existing alias */
708                 FREE(mpp->alias);
709                 mpp->alias = STRDUP(cmpp->alias);
710                 mpp->action = ACT_IMPOSSIBLE;
711                 return;
712         }
713
714         if (pathcount(mpp, PATH_UP) == 0) {
715                 mpp->action = ACT_IMPOSSIBLE;
716                 condlog(3, "%s: set ACT_IMPOSSIBLE (no usable path)",
717                         mpp->alias);
718                 return;
719         }
720         if (force_reload) {
721                 mpp->force_udev_reload = 1;
722                 mpp->action = ACT_RELOAD;
723                 condlog(3, "%s: set ACT_RELOAD (forced by user)",
724                         mpp->alias);
725                 return;
726         }
727         if (cmpp->size != mpp->size) {
728                 mpp->force_udev_reload = 1;
729                 mpp->action = ACT_RESIZE;
730                 condlog(3, "%s: set ACT_RESIZE (size change)",
731                         mpp->alias);
732                 return;
733         }
734
735         if (mpp->no_path_retry != NO_PATH_RETRY_UNDEF &&
736             !!strstr(mpp->features, "queue_if_no_path") !=
737             !!strstr(cmpp->features, "queue_if_no_path")) {
738                 mpp->action =  ACT_RELOAD;
739                 condlog(3, "%s: set ACT_RELOAD (no_path_retry change)",
740                         mpp->alias);
741                 return;
742         }
743         if ((mpp->retain_hwhandler != RETAIN_HWHANDLER_ON ||
744              strcmp(cmpp->hwhandler, "0") == 0) &&
745             (strlen(cmpp->hwhandler) != strlen(mpp->hwhandler) ||
746              strncmp(cmpp->hwhandler, mpp->hwhandler,
747                     strlen(mpp->hwhandler)))) {
748                 mpp->action = ACT_RELOAD;
749                 condlog(3, "%s: set ACT_RELOAD (hwhandler change)",
750                         mpp->alias);
751                 return;
752         }
753
754         if (mpp->retain_hwhandler != RETAIN_HWHANDLER_UNDEF &&
755             !!strstr(mpp->features, "retain_attached_hw_handler") !=
756             !!strstr(cmpp->features, "retain_attached_hw_handler") &&
757             get_linux_version_code() < KERNEL_VERSION(4, 3, 0)) {
758                 mpp->action = ACT_RELOAD;
759                 condlog(3, "%s: set ACT_RELOAD (retain_hwhandler change)",
760                         mpp->alias);
761                 return;
762         }
763
764         cmpp_feat = STRDUP(cmpp->features);
765         mpp_feat = STRDUP(mpp->features);
766         if (cmpp_feat && mpp_feat) {
767                 remove_feature(&mpp_feat, "queue_if_no_path");
768                 remove_feature(&mpp_feat, "retain_attached_hw_handler");
769                 remove_feature(&cmpp_feat, "queue_if_no_path");
770                 remove_feature(&cmpp_feat, "retain_attached_hw_handler");
771                 if (strncmp(mpp_feat, cmpp_feat, PARAMS_SIZE)) {
772                         mpp->action =  ACT_RELOAD;
773                         condlog(3, "%s: set ACT_RELOAD (features change)",
774                                 mpp->alias);
775                 }
776         }
777         FREE(cmpp_feat);
778         FREE(mpp_feat);
779
780         if (!cmpp->selector || strncmp(cmpp->selector, mpp->selector,
781                     strlen(mpp->selector))) {
782                 mpp->action = ACT_RELOAD;
783                 condlog(3, "%s: set ACT_RELOAD (selector change)",
784                         mpp->alias);
785                 return;
786         }
787         if (cmpp->minio != mpp->minio) {
788                 mpp->action = ACT_RELOAD;
789                 condlog(3, "%s: set ACT_RELOAD (minio change, %u->%u)",
790                         mpp->alias, cmpp->minio, mpp->minio);
791                 return;
792         }
793         if (!cmpp->pg || VECTOR_SIZE(cmpp->pg) != VECTOR_SIZE(mpp->pg)) {
794                 mpp->action = ACT_RELOAD;
795                 condlog(3, "%s: set ACT_RELOAD (path group number change)",
796                         mpp->alias);
797                 return;
798         }
799         if (pgcmp(mpp, cmpp)) {
800                 mpp->action = ACT_RELOAD;
801                 condlog(3, "%s: set ACT_RELOAD (path group topology change)",
802                         mpp->alias);
803                 return;
804         }
805         if (cmpp->nextpg != mpp->bestpg) {
806                 mpp->action = ACT_SWITCHPG;
807                 condlog(3, "%s: set ACT_SWITCHPG (next path group change)",
808                         mpp->alias);
809                 return;
810         }
811         if (!is_mpp_known_to_udev(cmpp)) {
812                 mpp->action = ACT_RELOAD;
813                 condlog(3, "%s: set ACT_RELOAD (udev device not initialized)",
814                         mpp->alias);
815                 return;
816         }
817         mpp->action = ACT_NOTHING;
818         condlog(3, "%s: set ACT_NOTHING (map unchanged)",
819                 mpp->alias);
820         return;
821 }
822
823 int reinstate_paths(struct multipath *mpp)
824 {
825         int i, j;
826         struct pathgroup * pgp;
827         struct path * pp;
828
829         if (!mpp->pg)
830                 return 0;
831
832         vector_foreach_slot (mpp->pg, pgp, i) {
833                 if (!pgp->paths)
834                         continue;
835
836                 vector_foreach_slot (pgp->paths, pp, j) {
837                         if (pp->state != PATH_UP &&
838                             (pgp->status == PGSTATE_DISABLED ||
839                              pgp->status == PGSTATE_ACTIVE))
840                                 continue;
841
842                         if (pp->dmstate == PSTATE_FAILED) {
843                                 if (dm_reinstate_path(mpp->alias, pp->dev_t))
844                                         condlog(0, "%s: error reinstating",
845                                                 pp->dev);
846                         }
847                 }
848         }
849         return 0;
850 }
851
852 static int
853 lock_multipath (struct multipath * mpp, int lock)
854 {
855         struct pathgroup * pgp;
856         struct path * pp;
857         int i, j;
858         int x, y;
859
860         if (!mpp || !mpp->pg)
861                 return 0;
862
863         vector_foreach_slot (mpp->pg, pgp, i) {
864                 if (!pgp->paths)
865                         continue;
866                 vector_foreach_slot(pgp->paths, pp, j) {
867                         if (lock && flock(pp->fd, LOCK_SH | LOCK_NB) &&
868                             errno == EWOULDBLOCK)
869                                 goto fail;
870                         else if (!lock)
871                                 flock(pp->fd, LOCK_UN);
872                 }
873         }
874         return 0;
875 fail:
876         vector_foreach_slot (mpp->pg, pgp, x) {
877                 if (x > i)
878                         return 1;
879                 if (!pgp->paths)
880                         continue;
881                 vector_foreach_slot(pgp->paths, pp, y) {
882                         if (x == i && y >= j)
883                                 return 1;
884                         flock(pp->fd, LOCK_UN);
885                 }
886         }
887         return 1;
888 }
889
890 int domap(struct multipath *mpp, char *params, int is_daemon)
891 {
892         int r = DOMAP_FAIL;
893         struct config *conf;
894         int verbosity;
895
896         /*
897          * last chance to quit before touching the devmaps
898          */
899         if (mpp->action == ACT_DRY_RUN) {
900                 conf = get_multipath_config();
901                 verbosity = conf->verbosity;
902                 put_multipath_config(conf);
903                 print_multipath_topology(mpp, verbosity);
904                 return DOMAP_DRY;
905         }
906
907         if (mpp->action == ACT_CREATE &&
908             dm_map_present(mpp->alias)) {
909                 condlog(3, "%s: map already present", mpp->alias);
910                 mpp->action = ACT_RELOAD;
911         }
912
913         switch (mpp->action) {
914         case ACT_REJECT:
915         case ACT_NOTHING:
916         case ACT_IMPOSSIBLE:
917                 return DOMAP_EXIST;
918
919         case ACT_SWITCHPG:
920                 dm_switchgroup(mpp->alias, mpp->bestpg);
921                 /*
922                  * we may have avoided reinstating paths because there where in
923                  * active or disabled PG. Now that the topology has changed,
924                  * retry.
925                  */
926                 reinstate_paths(mpp);
927                 return DOMAP_EXIST;
928
929         case ACT_CREATE:
930                 if (lock_multipath(mpp, 1)) {
931                         condlog(3, "%s: failed to create map (in use)",
932                                 mpp->alias);
933                         return DOMAP_RETRY;
934                 }
935
936                 sysfs_set_max_sectors_kb(mpp, 0);
937                 if (is_daemon && mpp->ghost_delay > 0 && mpp->nr_active &&
938                     pathcount(mpp, PATH_GHOST) == mpp->nr_active)
939                         mpp->ghost_delay_tick = mpp->ghost_delay;
940                 r = dm_addmap_create(mpp, params);
941
942                 lock_multipath(mpp, 0);
943                 break;
944
945         case ACT_RELOAD:
946                 sysfs_set_max_sectors_kb(mpp, 1);
947                 if (mpp->ghost_delay_tick > 0 && pathcount(mpp, PATH_UP))
948                         mpp->ghost_delay_tick = 0;
949                 r = dm_addmap_reload(mpp, params, 0);
950                 break;
951
952         case ACT_RESIZE:
953                 sysfs_set_max_sectors_kb(mpp, 1);
954                 if (mpp->ghost_delay_tick > 0 && pathcount(mpp, PATH_UP))
955                         mpp->ghost_delay_tick = 0;
956                 r = dm_addmap_reload(mpp, params, 1);
957                 break;
958
959         case ACT_RENAME:
960                 conf = get_multipath_config();
961                 pthread_cleanup_push(put_multipath_config, conf);
962                 r = dm_rename(mpp->alias_old, mpp->alias,
963                               conf->partition_delim, mpp->skip_kpartx);
964                 pthread_cleanup_pop(1);
965                 break;
966
967         case ACT_FORCERENAME:
968                 conf = get_multipath_config();
969                 pthread_cleanup_push(put_multipath_config, conf);
970                 r = dm_rename(mpp->alias_old, mpp->alias,
971                               conf->partition_delim, mpp->skip_kpartx);
972                 pthread_cleanup_pop(1);
973                 if (r) {
974                         sysfs_set_max_sectors_kb(mpp, 1);
975                         if (mpp->ghost_delay_tick > 0 &&
976                             pathcount(mpp, PATH_UP))
977                                 mpp->ghost_delay_tick = 0;
978                         r = dm_addmap_reload(mpp, params, 0);
979                 }
980                 break;
981
982         default:
983                 break;
984         }
985
986         if (r == DOMAP_OK) {
987                 /*
988                  * DM_DEVICE_CREATE, DM_DEVICE_RENAME, or DM_DEVICE_RELOAD
989                  * succeeded
990                  */
991                 mpp->force_udev_reload = 0;
992                 if (mpp->action == ACT_CREATE &&
993                     (remember_wwid(mpp->wwid) == 1 ||
994                      mpp->needs_paths_uevent))
995                         trigger_paths_udev_change(mpp, true);
996                 if (!is_daemon) {
997                         /* multipath client mode */
998                         dm_switchgroup(mpp->alias, mpp->bestpg);
999                 } else  {
1000                         /* multipath daemon mode */
1001                         mpp->stat_map_loads++;
1002                         condlog(2, "%s: load table [0 %llu %s %s]", mpp->alias,
1003                                 mpp->size, TGT_MPATH, params);
1004                         /*
1005                          * Required action is over, reset for the stateful daemon.
1006                          * But don't do it for creation as we use in the caller the
1007                          * mpp->action to figure out whether to start the watievent checker.
1008                          */
1009                         if (mpp->action != ACT_CREATE)
1010                                 mpp->action = ACT_NOTHING;
1011                         else {
1012                                 conf = get_multipath_config();
1013                                 mpp->wait_for_udev = 1;
1014                                 mpp->uev_wait_tick = conf->uev_wait_timeout;
1015                                 put_multipath_config(conf);
1016                         }
1017                 }
1018                 dm_setgeometry(mpp);
1019                 return DOMAP_OK;
1020         } else if (r == DOMAP_FAIL && mpp->action == ACT_CREATE &&
1021                    mpp->needs_paths_uevent)
1022                 trigger_paths_udev_change(mpp, false);
1023
1024         return DOMAP_FAIL;
1025 }
1026
1027 static int
1028 deadmap (struct multipath * mpp)
1029 {
1030         int i, j;
1031         struct pathgroup * pgp;
1032         struct path * pp;
1033
1034         if (!mpp->pg)
1035                 return 1;
1036
1037         vector_foreach_slot (mpp->pg, pgp, i) {
1038                 if (!pgp->paths)
1039                         continue;
1040
1041                 vector_foreach_slot (pgp->paths, pp, j)
1042                         if (strlen(pp->dev))
1043                                 return 0; /* alive */
1044         }
1045
1046         return 1; /* dead */
1047 }
1048
1049 int check_daemon(void)
1050 {
1051         int fd;
1052         char *reply;
1053         int ret = 0;
1054         unsigned int timeout;
1055         struct config *conf;
1056
1057         fd = mpath_connect();
1058         if (fd == -1)
1059                 return 0;
1060
1061         if (send_packet(fd, "show daemon") != 0)
1062                 goto out;
1063         conf = get_multipath_config();
1064         timeout = conf->uxsock_timeout;
1065         put_multipath_config(conf);
1066         if (recv_packet(fd, &reply, timeout) != 0)
1067                 goto out;
1068
1069         if (reply && strstr(reply, "shutdown"))
1070                 goto out_free;
1071
1072         ret = 1;
1073
1074 out_free:
1075         FREE(reply);
1076 out:
1077         mpath_disconnect(fd);
1078         return ret;
1079 }
1080
1081 /*
1082  * The force_reload parameter determines how coalesce_paths treats existing maps.
1083  * FORCE_RELOAD_NONE: existing maps aren't touched at all
1084  * FORCE_RELOAD_YES: all maps are rebuilt from scratch and (re)loaded in DM
1085  * FORCE_RELOAD_WEAK: existing maps are compared to the current conf and only
1086  * reloaded in DM if there's a difference. This is useful during startup.
1087  */
1088 int coalesce_paths (struct vectors * vecs, vector newmp, char * refwwid,
1089                     int force_reload, enum mpath_cmds cmd)
1090 {
1091         int ret = CP_FAIL;
1092         int k, i, r;
1093         int is_daemon = (cmd == CMD_NONE) ? 1 : 0;
1094         char params[PARAMS_SIZE];
1095         struct multipath * mpp;
1096         struct path * pp1;
1097         struct path * pp2;
1098         vector curmp = vecs->mpvec;
1099         vector pathvec = vecs->pathvec;
1100         struct config *conf;
1101         int allow_queueing;
1102         uint64_t *size_mismatch_seen;
1103
1104         /* ignore refwwid if it's empty */
1105         if (refwwid && !strlen(refwwid))
1106                 refwwid = NULL;
1107
1108         if (force_reload != FORCE_RELOAD_NONE) {
1109                 vector_foreach_slot (pathvec, pp1, k) {
1110                         pp1->mpp = NULL;
1111                 }
1112         }
1113
1114         if (VECTOR_SIZE(pathvec) == 0)
1115                 return CP_OK;
1116         size_mismatch_seen = calloc((VECTOR_SIZE(pathvec) - 1) / 64 + 1,
1117                                     sizeof(uint64_t));
1118         if (size_mismatch_seen == NULL)
1119                 return CP_FAIL;
1120
1121         vector_foreach_slot (pathvec, pp1, k) {
1122                 int invalid;
1123                 /* skip this path for some reason */
1124
1125                 /* 1. if path has no unique id or wwid blacklisted */
1126                 if (strlen(pp1->wwid) == 0) {
1127                         orphan_path(pp1, "no WWID");
1128                         continue;
1129                 }
1130
1131                 conf = get_multipath_config();
1132                 pthread_cleanup_push(put_multipath_config, conf);
1133                 invalid = (filter_path(conf, pp1) > 0);
1134                 pthread_cleanup_pop(1);
1135                 if (invalid) {
1136                         orphan_path(pp1, "blacklisted");
1137                         continue;
1138                 }
1139
1140                 /* 2. if path already coalesced, or seen and discarded */
1141                 if (pp1->mpp || is_bit_set_in_array(k, size_mismatch_seen))
1142                         continue;
1143
1144                 /* 3. if path has disappeared */
1145                 if (pp1->state == PATH_REMOVED) {
1146                         orphan_path(pp1, "path removed");
1147                         continue;
1148                 }
1149
1150                 /* 4. path is out of scope */
1151                 if (refwwid && strncmp(pp1->wwid, refwwid, WWID_SIZE - 1))
1152                         continue;
1153
1154                 /* If find_multipaths was selected check if the path is valid */
1155                 if (!refwwid && !should_multipath(pp1, pathvec, curmp)) {
1156                         orphan_path(pp1, "only one path");
1157                         continue;
1158                 }
1159
1160                 /*
1161                  * at this point, we know we really got a new mp
1162                  */
1163                 mpp = add_map_with_path(vecs, pp1, 0);
1164                 if (!mpp) {
1165                         orphan_path(pp1, "failed to create multipath device");
1166                         continue;
1167                 }
1168
1169                 if (!mpp->paths) {
1170                         condlog(0, "%s: skip coalesce (no paths)", mpp->alias);
1171                         remove_map(mpp, vecs, 0);
1172                         continue;
1173                 }
1174
1175                 for (i = k + 1; i < VECTOR_SIZE(pathvec); i++) {
1176                         pp2 = VECTOR_SLOT(pathvec, i);
1177
1178                         if (strcmp(pp1->wwid, pp2->wwid))
1179                                 continue;
1180
1181                         if (!mpp->size && pp2->size)
1182                                 mpp->size = pp2->size;
1183
1184                         if (mpp->size && pp2->size &&
1185                             pp2->size != mpp->size) {
1186                                 /*
1187                                  * ouch, avoid feeding that to the DM
1188                                  */
1189                                 condlog(0, "%s: size %llu, expected %llu. "
1190                                         "Discard", pp2->dev, pp2->size,
1191                                         mpp->size);
1192                                 mpp->action = ACT_REJECT;
1193                                 set_bit_in_array(i, size_mismatch_seen);
1194                         }
1195                 }
1196                 verify_paths(mpp, vecs);
1197
1198                 params[0] = '\0';
1199                 if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1200                         remove_map(mpp, vecs, 0);
1201                         continue;
1202                 }
1203
1204                 if (cmd == CMD_DRY_RUN)
1205                         mpp->action = ACT_DRY_RUN;
1206                 if (mpp->action == ACT_UNDEF)
1207                         select_action(mpp, curmp,
1208                                       force_reload == FORCE_RELOAD_YES ? 1 : 0);
1209
1210                 r = domap(mpp, params, is_daemon);
1211
1212                 if (r == DOMAP_FAIL || r == DOMAP_RETRY) {
1213                         condlog(3, "%s: domap (%u) failure "
1214                                    "for create/reload map",
1215                                 mpp->alias, r);
1216                         if (r == DOMAP_FAIL || is_daemon) {
1217                                 condlog(2, "%s: %s map",
1218                                         mpp->alias, (mpp->action == ACT_CREATE)?
1219                                         "ignoring" : "removing");
1220                                 remove_map(mpp, vecs, 0);
1221                                 continue;
1222                         } else /* if (r == DOMAP_RETRY && !is_daemon) */ {
1223                                 ret = CP_RETRY;
1224                                 goto out;
1225                         }
1226                 }
1227                 if (r == DOMAP_DRY)
1228                         continue;
1229
1230                 if (r == DOMAP_EXIST && mpp->action == ACT_NOTHING &&
1231                     force_reload == FORCE_RELOAD_WEAK)
1232                         /*
1233                          * First time we're called, and no changes applied.
1234                          * domap() was a noop. But we can't be sure that
1235                          * udev has already finished setting up this device
1236                          * (udev in initrd may have been shut down while
1237                          * processing this device or its children).
1238                          * Trigger a change event, just in case.
1239                          */
1240                         trigger_udev_change(find_mp_by_wwid(curmp, mpp->wwid));
1241
1242                 conf = get_multipath_config();
1243                 allow_queueing = conf->allow_queueing;
1244                 put_multipath_config(conf);
1245                 if (!is_daemon && !allow_queueing && !check_daemon()) {
1246                         if (mpp->no_path_retry != NO_PATH_RETRY_UNDEF &&
1247                             mpp->no_path_retry != NO_PATH_RETRY_FAIL)
1248                                 condlog(3, "%s: multipathd not running, unset "
1249                                         "queue_if_no_path feature", mpp->alias);
1250                         if (!dm_queue_if_no_path(mpp->alias, 0))
1251                                 remove_feature(&mpp->features,
1252                                                "queue_if_no_path");
1253                 }
1254
1255                 if (!is_daemon && mpp->action != ACT_NOTHING) {
1256                         int verbosity;
1257
1258                         conf = get_multipath_config();
1259                         verbosity = conf->verbosity;
1260                         put_multipath_config(conf);
1261                         print_multipath_topology(mpp, verbosity);
1262                 }
1263
1264                 if (newmp) {
1265                         if (mpp->action != ACT_REJECT) {
1266                                 if (!vector_alloc_slot(newmp))
1267                                         goto out;
1268                                 vector_set_slot(newmp, mpp);
1269                         }
1270                         else
1271                                 remove_map(mpp, vecs, 0);
1272                 }
1273         }
1274         /*
1275          * Flush maps with only dead paths (ie not in sysfs)
1276          * Keep maps with only failed paths
1277          */
1278         if (newmp) {
1279                 vector_foreach_slot (newmp, mpp, i) {
1280                         char alias[WWID_SIZE];
1281
1282                         if (!deadmap(mpp))
1283                                 continue;
1284
1285                         strlcpy(alias, mpp->alias, WWID_SIZE);
1286
1287                         vector_del_slot(newmp, i);
1288                         i--;
1289                         remove_map(mpp, vecs, 0);
1290
1291                         if (dm_flush_map(alias))
1292                                 condlog(2, "%s: remove failed (dead)",
1293                                         alias);
1294                         else
1295                                 condlog(2, "%s: remove (dead)", alias);
1296                 }
1297         }
1298         ret = CP_OK;
1299 out:
1300         free(size_mismatch_seen);
1301         return ret;
1302 }
1303
1304 struct udev_device *get_udev_device(const char *dev, enum devtypes dev_type)
1305 {
1306         struct udev_device *ud = NULL;
1307         const char *base;
1308
1309         if (dev == NULL || *dev == '\0')
1310                 return NULL;
1311
1312         switch (dev_type) {
1313         case DEV_DEVNODE:
1314         case DEV_DEVMAP:
1315                 /* This should be GNU basename, compiler will warn if not */
1316                 base = basename(dev);
1317                 if (*base == '\0')
1318                         break;
1319                 ud = udev_device_new_from_subsystem_sysname(udev, "block",
1320                                                             base);
1321                 break;
1322         case DEV_DEVT:
1323                 ud = udev_device_new_from_devnum(udev, 'b', parse_devt(dev));
1324                 break;
1325         case DEV_UEVENT:
1326                 ud = udev_device_new_from_environment(udev);
1327                 break;
1328         default:
1329                 condlog(0, "Internal error: get_udev_device called with invalid type %d\n",
1330                         dev_type);
1331                 break;
1332         }
1333         if (ud == NULL)
1334                 condlog(2, "get_udev_device: failed to look up %s with type %d",
1335                         dev, dev_type);
1336         return ud;
1337 }
1338
1339 /*
1340  * returns:
1341  * 0 - success
1342  * 1 - failure
1343  * 2 - blacklist
1344  */
1345 int get_refwwid(enum mpath_cmds cmd, char *dev, enum devtypes dev_type,
1346                 vector pathvec, char **wwid)
1347 {
1348         int ret = 1;
1349         struct path * pp;
1350         char buff[FILE_NAME_SIZE];
1351         char * refwwid = NULL, tmpwwid[WWID_SIZE];
1352         int flags = DI_SYSFS | DI_WWID;
1353         struct config *conf;
1354         int invalid = 0;
1355
1356         if (!wwid)
1357                 return 1;
1358         *wwid = NULL;
1359
1360         if (dev_type == DEV_NONE)
1361                 return 1;
1362
1363         if (cmd != CMD_REMOVE_WWID)
1364                 flags |= DI_BLACKLIST;
1365
1366         if (dev_type == DEV_DEVNODE) {
1367                 if (basenamecpy(dev, buff, FILE_NAME_SIZE) == 0) {
1368                         condlog(1, "basename failed for '%s' (%s)",
1369                                 dev, buff);
1370                         return 1;
1371                 }
1372
1373                 pp = find_path_by_dev(pathvec, buff);
1374                 if (!pp) {
1375                         struct udev_device *udevice =
1376                                 get_udev_device(buff, dev_type);
1377
1378                         if (!udevice)
1379                                 return 1;
1380
1381                         conf = get_multipath_config();
1382                         pthread_cleanup_push(put_multipath_config, conf);
1383                         ret = store_pathinfo(pathvec, conf, udevice,
1384                                              flags, &pp);
1385                         pthread_cleanup_pop(1);
1386                         udev_device_unref(udevice);
1387                         if (!pp) {
1388                                 if (ret == 1)
1389                                         condlog(0, "%s: can't store path info",
1390                                                 dev);
1391                                 return ret;
1392                         }
1393                 }
1394                 conf = get_multipath_config();
1395                 pthread_cleanup_push(put_multipath_config, conf);
1396                 if (pp->udev && pp->uid_attribute &&
1397                     filter_property(conf, pp->udev, 3, pp->uid_attribute) > 0)
1398                         invalid = 1;
1399                 pthread_cleanup_pop(1);
1400                 if (invalid)
1401                         return 2;
1402
1403                 refwwid = pp->wwid;
1404                 goto out;
1405         }
1406
1407         if (dev_type == DEV_DEVT) {
1408                 strchop(dev);
1409                 if (devt2devname(buff, FILE_NAME_SIZE, dev)) {
1410                         condlog(0, "%s: cannot find block device\n", dev);
1411                         return 1;
1412                 }
1413                 pp = find_path_by_dev(pathvec, buff);
1414                 if (!pp) {
1415                         struct udev_device *udevice =
1416                                 get_udev_device(dev, dev_type);
1417
1418                         if (!udevice)
1419                                 return 1;
1420
1421                         conf = get_multipath_config();
1422                         pthread_cleanup_push(put_multipath_config, conf);
1423                         ret = store_pathinfo(pathvec, conf, udevice,
1424                                              flags, &pp);
1425                         pthread_cleanup_pop(1);
1426                         udev_device_unref(udevice);
1427                         if (!pp) {
1428                                 if (ret == 1)
1429                                         condlog(0, "%s can't store path info",
1430                                                 buff);
1431                                 return ret;
1432                         }
1433                 }
1434                 conf = get_multipath_config();
1435                 pthread_cleanup_push(put_multipath_config, conf);
1436                 if (pp->udev && pp->uid_attribute &&
1437                     filter_property(conf, pp->udev, 3, pp->uid_attribute) > 0)
1438                         invalid = 1;
1439                 pthread_cleanup_pop(1);
1440                 if (invalid)
1441                         return 2;
1442                 refwwid = pp->wwid;
1443                 goto out;
1444         }
1445
1446         if (dev_type == DEV_UEVENT) {
1447                 struct udev_device *udevice = get_udev_device(dev, dev_type);
1448
1449                 if (!udevice)
1450                         return 1;
1451
1452                 conf = get_multipath_config();
1453                 pthread_cleanup_push(put_multipath_config, conf);
1454                 ret = store_pathinfo(pathvec, conf, udevice,
1455                                      flags, &pp);
1456                 pthread_cleanup_pop(1);
1457                 udev_device_unref(udevice);
1458                 if (!pp) {
1459                         if (ret == 1)
1460                                 condlog(0, "%s: can't store path info", dev);
1461                         return ret;
1462                 }
1463                 conf = get_multipath_config();
1464                 pthread_cleanup_push(put_multipath_config, conf);
1465                 if (pp->udev && pp->uid_attribute &&
1466                     filter_property(conf, pp->udev, 3, pp->uid_attribute) > 0)
1467                         invalid = 1;
1468                 pthread_cleanup_pop(1);
1469                 if (invalid)
1470                         return 2;
1471                 refwwid = pp->wwid;
1472                 goto out;
1473         }
1474
1475         if (dev_type == DEV_DEVMAP) {
1476
1477                 conf = get_multipath_config();
1478                 pthread_cleanup_push(put_multipath_config, conf);
1479                 if (((dm_get_uuid(dev, tmpwwid, WWID_SIZE)) == 0)
1480                     && (strlen(tmpwwid))) {
1481                         refwwid = tmpwwid;
1482                         goto check;
1483                 }
1484
1485                 /*
1486                  * may be a binding
1487                  */
1488                 if (get_user_friendly_wwid(dev, tmpwwid,
1489                                            conf->bindings_file) == 0) {
1490                         refwwid = tmpwwid;
1491                         goto check;
1492                 }
1493
1494                 /*
1495                  * or may be an alias
1496                  */
1497                 refwwid = get_mpe_wwid(conf->mptable, dev);
1498
1499                 /*
1500                  * or directly a wwid
1501                  */
1502                 if (!refwwid)
1503                         refwwid = dev;
1504
1505 check:
1506                 if (refwwid && strlen(refwwid) &&
1507                     filter_wwid(conf->blist_wwid, conf->elist_wwid, refwwid,
1508                                 NULL) > 0)
1509                         invalid = 1;
1510                 pthread_cleanup_pop(1);
1511                 if (invalid)
1512                         return 2;
1513         }
1514 out:
1515         if (refwwid && strlen(refwwid)) {
1516                 *wwid = STRDUP(refwwid);
1517                 return 0;
1518         }
1519
1520         return 1;
1521 }
1522
1523 int reload_map(struct vectors *vecs, struct multipath *mpp, int refresh,
1524                int is_daemon)
1525 {
1526         char params[PARAMS_SIZE] = {0};
1527         struct path *pp;
1528         int i, r;
1529
1530         update_mpp_paths(mpp, vecs->pathvec);
1531         if (refresh) {
1532                 vector_foreach_slot (mpp->paths, pp, i) {
1533                         struct config *conf = get_multipath_config();
1534                         pthread_cleanup_push(put_multipath_config, conf);
1535                         r = pathinfo(pp, conf, DI_PRIO);
1536                         pthread_cleanup_pop(1);
1537                         if (r) {
1538                                 condlog(2, "%s: failed to refresh pathinfo",
1539                                         mpp->alias);
1540                                 return 1;
1541                         }
1542                 }
1543         }
1544         if (setup_map(mpp, params, PARAMS_SIZE, vecs)) {
1545                 condlog(0, "%s: failed to setup map", mpp->alias);
1546                 return 1;
1547         }
1548         select_action(mpp, vecs->mpvec, 1);
1549
1550         r = domap(mpp, params, is_daemon);
1551         if (r == DOMAP_FAIL || r == DOMAP_RETRY) {
1552                 condlog(3, "%s: domap (%u) failure "
1553                         "for reload map", mpp->alias, r);
1554                 return 1;
1555         }
1556
1557         return 0;
1558 }