8c0866d22b834fed093721bd04aa0b8f85d15e7d
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <mpath_persist.h>
21
22 /*
23  * libcheckers
24  */
25 #include <checkers.h>
26
27 /*
28  * libmultipath
29  */
30 #include <parser.h>
31 #include <vector.h>
32 #include <memory.h>
33 #include <config.h>
34 #include <util.h>
35 #include <hwtable.h>
36 #include <defaults.h>
37 #include <structs.h>
38 #include <blacklist.h>
39 #include <structs_vec.h>
40 #include <dmparser.h>
41 #include <devmapper.h>
42 #include <sysfs.h>
43 #include <dict.h>
44 #include <discovery.h>
45 #include <debug.h>
46 #include <propsel.h>
47 #include <uevent.h>
48 #include <switchgroup.h>
49 #include <print.h>
50 #include <configure.h>
51 #include <prio.h>
52 #include <pgpolicies.h>
53 #include <uevent.h>
54
55 #include "main.h"
56 #include "pidfile.h"
57 #include "uxlsnr.h"
58 #include "uxclnt.h"
59 #include "cli.h"
60 #include "cli_handlers.h"
61 #include "lock.h"
62 #include "waiter.h"
63
64 #define FILE_NAME_SIZE 256
65 #define CMDSIZE 160
66
67 #define LOG_MSG(a, b) \
68 do { \
69         if (pp->offline) \
70                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
71         else if (strlen(b)) \
72                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
73 } while(0)
74
75 struct mpath_event_param
76 {
77         char * devname;
78         struct multipath *mpp;
79 };
80
81 unsigned int mpath_mx_alloc_len;
82
83 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
84 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85
86 int logsink;
87 enum daemon_status running_state;
88 pid_t daemon_pid;
89
90 /*
91  * global copy of vecs for use in sig handlers
92  */
93 struct vectors * gvecs;
94
95 static int
96 need_switch_pathgroup (struct multipath * mpp, int refresh)
97 {
98         struct pathgroup * pgp;
99         struct path * pp;
100         unsigned int i, j;
101
102         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
103                 return 0;
104
105         /*
106          * Refresh path priority values
107          */
108         if (refresh)
109                 vector_foreach_slot (mpp->pg, pgp, i)
110                         vector_foreach_slot (pgp->paths, pp, j)
111                                 pathinfo(pp, conf->hwtable, DI_PRIO);
112
113         mpp->bestpg = select_path_group(mpp);
114
115         if (mpp->bestpg != mpp->nextpg)
116                 return 1;
117
118         return 0;
119 }
120
121 static void
122 switch_pathgroup (struct multipath * mpp)
123 {
124         mpp->stat_switchgroup++;
125         dm_switchgroup(mpp->alias, mpp->bestpg);
126         condlog(2, "%s: switch to path group #%i",
127                  mpp->alias, mpp->bestpg);
128 }
129
130 static int
131 coalesce_maps(struct vectors *vecs, vector nmpv)
132 {
133         struct multipath * ompp;
134         vector ompv = vecs->mpvec;
135         unsigned int i;
136         int j;
137
138         vector_foreach_slot (ompv, ompp, i) {
139                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
140                         /*
141                          * remove all current maps not allowed by the
142                          * current configuration
143                          */
144                         if (dm_flush_map(ompp->alias)) {
145                                 condlog(0, "%s: unable to flush devmap",
146                                         ompp->alias);
147                                 /*
148                                  * may be just because the device is open
149                                  */
150                                 if (!vector_alloc_slot(nmpv))
151                                         return 1;
152
153                                 vector_set_slot(nmpv, ompp);
154                                 setup_multipath(vecs, ompp);
155
156                                 if ((j = find_slot(ompv, (void *)ompp)) != -1)
157                                         vector_del_slot(ompv, j);
158
159                                 continue;
160                         }
161                         else {
162                                 dm_lib_release();
163                                 condlog(2, "%s devmap removed", ompp->alias);
164                         }
165                 } else if (conf->reassign_maps) {
166                         condlog(3, "%s: Reassign existing device-mapper"
167                                 " devices", ompp->alias);
168                         dm_reassign(ompp->alias);
169                 }
170         }
171         return 0;
172 }
173
174 void
175 sync_map_state(struct multipath *mpp)
176 {
177         struct pathgroup *pgp;
178         struct path *pp;
179         unsigned int i, j;
180
181         if (!mpp->pg)
182                 return;
183
184         vector_foreach_slot (mpp->pg, pgp, i){
185                 vector_foreach_slot (pgp->paths, pp, j){
186                         if (pp->state == PATH_UNCHECKED || 
187                             pp->state == PATH_WILD)
188                                 continue;
189                         if ((pp->dmstate == PSTATE_FAILED ||
190                              pp->dmstate == PSTATE_UNDEF) &&
191                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
192                                 dm_reinstate_path(mpp->alias, pp->dev_t);
193                         else if ((pp->dmstate == PSTATE_ACTIVE ||
194                                   pp->dmstate == PSTATE_UNDEF) &&
195                                  (pp->state == PATH_DOWN ||
196                                   pp->state == PATH_SHAKY))
197                                 dm_fail_path(mpp->alias, pp->dev_t);
198                 }
199         }
200 }
201
202 static void
203 sync_maps_state(vector mpvec)
204 {
205         unsigned int i;
206         struct multipath *mpp;
207
208         vector_foreach_slot (mpvec, mpp, i)
209                 sync_map_state(mpp);
210 }
211
212 static int
213 flush_map(struct multipath * mpp, struct vectors * vecs)
214 {
215         /*
216          * clear references to this map before flushing so we can ignore
217          * the spurious uevent we may generate with the dm_flush_map call below
218          */
219         if (dm_flush_map(mpp->alias)) {
220                 /*
221                  * May not really be an error -- if the map was already flushed
222                  * from the device mapper by dmsetup(8) for instance.
223                  */
224                 condlog(0, "%s: can't flush", mpp->alias);
225                 return 1;
226         }
227         else {
228                 dm_lib_release();
229                 condlog(2, "%s: devmap removed", mpp->alias);
230         }
231
232         orphan_paths(vecs->pathvec, mpp);
233         remove_map_and_stop_waiter(mpp, vecs, 1);
234
235         return 0;
236 }
237
238 static int
239 uev_add_map (struct uevent * uev, struct vectors * vecs)
240 {
241         char *alias;
242         int major = -1, minor = -1, rc;
243
244         condlog(3, "%s: add map (uevent)", uev->kernel);
245         alias = uevent_get_dm_name(uev);
246         if (!alias) {
247                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
248                 major = uevent_get_major(uev);
249                 minor = uevent_get_minor(uev);
250                 alias = dm_mapname(major, minor);
251                 if (!alias) {
252                         condlog(2, "%s: mapname not found for %d:%d",
253                                 uev->kernel, major, minor);
254                         return 1;
255                 }
256         }
257         rc = ev_add_map(uev->kernel, alias, vecs);
258         FREE(alias);
259         return rc;
260 }
261
262 int
263 ev_add_map (char * dev, char * alias, struct vectors * vecs)
264 {
265         char * refwwid;
266         struct multipath * mpp;
267         int map_present;
268         int r = 1;
269
270         map_present = dm_map_present(alias);
271
272         if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
273                 condlog(4, "%s: not a multipath map", alias);
274                 return 0;
275         }
276
277         mpp = find_mp_by_alias(vecs->mpvec, alias);
278
279         if (mpp) {
280                 /*
281                  * Not really an error -- we generate our own uevent
282                  * if we create a multipath mapped device as a result
283                  * of uev_add_path
284                  */
285                 if (conf->reassign_maps) {
286                         condlog(3, "%s: Reassign existing device-mapper devices",
287                                 alias);
288                         dm_reassign(alias);
289                 }
290                 return 0;
291         }
292         condlog(2, "%s: adding map", alias);
293
294         /*
295          * now we can register the map
296          */
297         if (map_present && (mpp = add_map_without_path(vecs, alias))) {
298                 sync_map_state(mpp);
299                 condlog(2, "%s: devmap %s registered", alias, dev);
300                 return 0;
301         }
302         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
303
304         if (refwwid) {
305                 r = coalesce_paths(vecs, NULL, refwwid, 0);
306                 dm_lib_release();
307         }
308
309         if (!r)
310                 condlog(2, "%s: devmap %s added", alias, dev);
311         else if (r == 2)
312                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
313         else
314                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
315
316         FREE(refwwid);
317         return r;
318 }
319
320 static int
321 uev_remove_map (struct uevent * uev, struct vectors * vecs)
322 {
323         char *alias;
324         int minor;
325         struct multipath *mpp;
326
327         condlog(2, "%s: remove map (uevent)", uev->kernel);
328         alias = uevent_get_dm_name(uev);
329         if (!alias) {
330                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
331                 return 0;
332         }
333         minor = uevent_get_minor(uev);
334         mpp = find_mp_by_minor(vecs->mpvec, minor);
335
336         if (!mpp) {
337                 condlog(2, "%s: devmap not registered, can't remove",
338                         uev->kernel);
339                 goto out;
340         }
341         if (strcmp(mpp->alias, alias)) {
342                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
343                         mpp->alias, mpp->dmi->minor, minor);
344                 goto out;
345         }
346
347         orphan_paths(vecs->pathvec, mpp);
348         remove_map_and_stop_waiter(mpp, vecs, 1);
349 out:
350         FREE(alias);
351         return 0;
352 }
353
354 int
355 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
356 {
357         struct multipath * mpp;
358
359         mpp = find_mp_by_minor(vecs->mpvec, minor);
360
361         if (!mpp) {
362                 condlog(2, "%s: devmap not registered, can't remove",
363                         devname);
364                 return 0;
365         }
366         if (strcmp(mpp->alias, alias)) {
367                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
368                         mpp->alias, mpp->dmi->minor, minor);
369                 return 0;
370         }
371         return flush_map(mpp, vecs);
372 }
373
374 static int
375 uev_add_path (struct uevent *uev, struct vectors * vecs)
376 {
377         struct path *pp;
378         int ret;
379
380         condlog(2, "%s: add path (uevent)", uev->kernel);
381         if (strstr(uev->kernel, "..") != NULL) {
382                 /*
383                  * Don't allow relative device names in the pathvec
384                  */
385                 condlog(0, "%s: path name is invalid", uev->kernel);
386                 return 1;
387         }
388
389         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
390         if (pp) {
391                 condlog(0, "%s: spurious uevent, path already in pathvec",
392                         uev->kernel);
393                 if (pp->mpp)
394                         return 0;
395         } else {
396                 /*
397                  * get path vital state
398                  */
399                 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
400                                      uev->udev, DI_ALL, &pp);
401                 if (!pp) {
402                         if (ret == 2)
403                                 return 0;
404                         condlog(0, "%s: failed to store path info",
405                                 uev->kernel);
406                         return 1;
407                 }
408                 pp->checkint = conf->checkint;
409         }
410
411         return ev_add_path(pp, vecs);
412 }
413
414 /*
415  * returns:
416  * 0: added
417  * 1: error
418  */
419 int
420 ev_add_path (struct path * pp, struct vectors * vecs)
421 {
422         struct multipath * mpp;
423         char empty_buff[WWID_SIZE] = {0};
424         char params[PARAMS_SIZE] = {0};
425         int retries = 3;
426         int start_waiter = 0;
427
428         /*
429          * need path UID to go any further
430          */
431         if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
432                 condlog(0, "%s: failed to get path uid", pp->dev);
433                 goto fail; /* leave path added to pathvec */
434         }
435         mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
436 rescan:
437         if (mpp) {
438                 if ((!pp->size) || (mpp->size != pp->size)) {
439                         if (!pp->size)
440                                 condlog(0, "%s: failed to add new path %s, "
441                                         "device size is 0",
442                                         mpp->alias, pp->dev);
443                         else
444                                 condlog(0, "%s: failed to add new path %s, "
445                                         "device size mismatch",
446                                         mpp->alias, pp->dev);
447                         int i = find_slot(vecs->pathvec, (void *)pp);
448                         if (i != -1)
449                                 vector_del_slot(vecs->pathvec, i);
450                         free_path(pp);
451                         return 1;
452                 }
453
454                 condlog(4,"%s: adopting all paths for path %s",
455                         mpp->alias, pp->dev);
456                 if (adopt_paths(vecs->pathvec, mpp, 1))
457                         goto fail; /* leave path added to pathvec */
458
459                 verify_paths(mpp, vecs, NULL);
460                 mpp->flush_on_last_del = FLUSH_UNDEF;
461                 mpp->action = ACT_RELOAD;
462         }
463         else {
464                 if (!pp->size) {
465                         condlog(0, "%s: failed to create new map,"
466                                 " device size is 0 ", pp->dev);
467                         int i = find_slot(vecs->pathvec, (void *)pp);
468                         if (i != -1)
469                                 vector_del_slot(vecs->pathvec, i);
470                         free_path(pp);
471                         return 1;
472                 }
473
474                 condlog(4,"%s: creating new map", pp->dev);
475                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
476                         mpp->action = ACT_CREATE;
477                         /*
478                          * We don't depend on ACT_CREATE, as domap will
479                          * set it to ACT_NOTHING when complete.
480                          */
481                         start_waiter = 1;
482                 }
483                 else
484                         goto fail; /* leave path added to pathvec */
485         }
486
487         /* persistent reseravtion check*/
488         mpath_pr_event_handle(pp);      
489
490         /*
491          * push the map to the device-mapper
492          */
493         if (setup_map(mpp, params, PARAMS_SIZE)) {
494                 condlog(0, "%s: failed to setup map for addition of new "
495                         "path %s", mpp->alias, pp->dev);
496                 goto fail_map;
497         }
498         /*
499          * reload the map for the multipath mapped device
500          */
501         if (domap(mpp, params) <= 0) {
502                 condlog(0, "%s: failed in domap for addition of new "
503                         "path %s", mpp->alias, pp->dev);
504                 /*
505                  * deal with asynchronous uevents :((
506                  */
507                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
508                         condlog(0, "%s: uev_add_path sleep", mpp->alias);
509                         sleep(1);
510                         update_mpp_paths(mpp, vecs->pathvec);
511                         goto rescan;
512                 }
513                 else if (mpp->action == ACT_RELOAD)
514                         condlog(0, "%s: giving up reload", mpp->alias);
515                 else
516                         goto fail_map;
517         }
518         dm_lib_release();
519
520         /*
521          * update our state from kernel regardless of create or reload
522          */
523         if (setup_multipath(vecs, mpp))
524                 goto fail; /* if setup_multipath fails, it removes the map */
525
526         sync_map_state(mpp);
527
528         if ((mpp->action == ACT_CREATE ||
529              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
530             start_waiter_thread(mpp, vecs))
531                         goto fail_map;
532
533         if (retries >= 0) {
534                 condlog(2, "%s path added to devmap %s", pp->dev, mpp->alias);
535                 return 0;
536         }
537         else
538                 return 1;
539
540 fail_map:
541         remove_map(mpp, vecs, 1);
542 fail:
543         orphan_path(pp);
544         return 1;
545 }
546
547 static int
548 uev_remove_path (struct uevent *uev, struct vectors * vecs)
549 {
550         struct path *pp;
551
552         condlog(2, "%s: remove path (uevent)", uev->kernel);
553         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
554
555         if (!pp) {
556                 /* Not an error; path might have been purged earlier */
557                 condlog(0, "%s: path already removed", uev->kernel);
558                 return 0;
559         }
560
561         return ev_remove_path(pp, vecs);
562 }
563
564 int
565 ev_remove_path (struct path *pp, struct vectors * vecs)
566 {
567         struct multipath * mpp;
568         int i, retval = 0;
569         char params[PARAMS_SIZE] = {0};
570
571         /*
572          * avoid referring to the map of an orphaned path
573          */
574         if ((mpp = pp->mpp)) {
575                 /*
576                  * transform the mp->pg vector of vectors of paths
577                  * into a mp->params string to feed the device-mapper
578                  */
579                 if (update_mpp_paths(mpp, vecs->pathvec)) {
580                         condlog(0, "%s: failed to update paths",
581                                 mpp->alias);
582                         goto fail;
583                 }
584                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
585                         vector_del_slot(mpp->paths, i);
586
587                 /*
588                  * remove the map IFF removing the last path
589                  */
590                 if (VECTOR_SIZE(mpp->paths) == 0) {
591                         char alias[WWID_SIZE];
592
593                         /*
594                          * flush_map will fail if the device is open
595                          */
596                         strncpy(alias, mpp->alias, WWID_SIZE);
597                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
598                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
599                                 mpp->retry_tick = 0;
600                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
601                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
602                                 dm_queue_if_no_path(mpp->alias, 0);
603                         }
604                         if (!flush_map(mpp, vecs)) {
605                                 condlog(2, "%s: removed map after"
606                                         " removing all paths",
607                                         alias);
608                                 retval = 0;
609                                 goto out;
610                         }
611                         /*
612                          * Not an error, continue
613                          */
614                 }
615
616                 if (setup_map(mpp, params, PARAMS_SIZE)) {
617                         condlog(0, "%s: failed to setup map for"
618                                 " removal of path %s", mpp->alias, pp->dev);
619                         goto fail;
620                 }
621                 /*
622                  * reload the map
623                  */
624                 mpp->action = ACT_RELOAD;
625                 if (domap(mpp, params) <= 0) {
626                         condlog(0, "%s: failed in domap for "
627                                 "removal of path %s",
628                                 mpp->alias, pp->dev);
629                         retval = 1;
630                 } else {
631                         /*
632                          * update our state from kernel
633                          */
634                         if (setup_multipath(vecs, mpp)) {
635                                 goto fail;
636                         }
637                         sync_map_state(mpp);
638
639                         condlog(2, "%s: path removed from map %s",
640                                 pp->dev, mpp->alias);
641                 }
642         }
643
644 out:
645         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
646                 vector_del_slot(vecs->pathvec, i);
647
648         free_path(pp);
649
650         return retval;
651
652 fail:
653         remove_map_and_stop_waiter(mpp, vecs, 1);
654         return 1;
655 }
656
657 static int
658 uev_update_path (struct uevent *uev, struct vectors * vecs)
659 {
660         int ro, retval = 0;
661
662         ro = uevent_get_disk_ro(uev);
663
664         if (ro >= 0) {
665                 struct path * pp;
666
667                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
668                         uev->kernel, ro);
669                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
670                 if (!pp) {
671                         condlog(0, "%s: spurious uevent, path not found",
672                                 uev->kernel);
673                         return 1;
674                 }
675                 if (pp->mpp) {
676                         retval = reload_map(vecs, pp->mpp, 0);
677
678                         condlog(2, "%s: map %s reloaded (retval %d)",
679                                 uev->kernel, pp->mpp->alias, retval);
680                 }
681
682         }
683
684         return retval;
685 }
686
687 static int
688 map_discovery (struct vectors * vecs)
689 {
690         struct multipath * mpp;
691         unsigned int i;
692
693         if (dm_get_maps(vecs->mpvec))
694                 return 1;
695
696         vector_foreach_slot (vecs->mpvec, mpp, i)
697                 if (setup_multipath(vecs, mpp))
698                         return 1;
699
700         return 0;
701 }
702
703 int
704 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
705 {
706         struct vectors * vecs;
707         int r;
708
709         *reply = NULL;
710         *len = 0;
711         vecs = (struct vectors *)trigger_data;
712
713         pthread_cleanup_push(cleanup_lock, &vecs->lock);
714         lock(vecs->lock);
715         pthread_testcancel();
716
717         r = parse_cmd(str, reply, len, vecs);
718
719         if (r > 0) {
720                 *reply = STRDUP("fail\n");
721                 *len = strlen(*reply) + 1;
722                 r = 1;
723         }
724         else if (!r && *len == 0) {
725                 *reply = STRDUP("ok\n");
726                 *len = strlen(*reply) + 1;
727                 r = 0;
728         }
729         /* else if (r < 0) leave *reply alone */
730
731         lock_cleanup_pop(vecs->lock);
732         return r;
733 }
734
735 static int
736 uev_discard(char * devpath)
737 {
738         char *tmp;
739         char a[11], b[11];
740
741         /*
742          * keep only block devices, discard partitions
743          */
744         tmp = strstr(devpath, "/block/");
745         if (tmp == NULL){
746                 condlog(4, "no /block/ in '%s'", devpath);
747                 return 1;
748         }
749         if (sscanf(tmp, "/block/%10s", a) != 1 ||
750             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
751                 condlog(4, "discard event on %s", devpath);
752                 return 1;
753         }
754         return 0;
755 }
756
757 int
758 uev_trigger (struct uevent * uev, void * trigger_data)
759 {
760         int r = 0;
761         struct vectors * vecs;
762
763         vecs = (struct vectors *)trigger_data;
764
765         if (uev_discard(uev->devpath))
766                 return 0;
767
768         pthread_cleanup_push(cleanup_lock, &vecs->lock);
769         lock(vecs->lock);
770         pthread_testcancel();
771
772         /*
773          * device map event
774          * Add events are ignored here as the tables
775          * are not fully initialised then.
776          */
777         if (!strncmp(uev->kernel, "dm-", 3)) {
778                 if (!strncmp(uev->action, "change", 6)) {
779                         r = uev_add_map(uev, vecs);
780                         goto out;
781                 }
782                 if (!strncmp(uev->action, "remove", 6)) {
783                         r = uev_remove_map(uev, vecs);
784                         goto out;
785                 }
786                 goto out;
787         }
788
789         /*
790          * path add/remove event
791          */
792         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
793                            uev->kernel) > 0)
794                 goto out;
795
796         if (!strncmp(uev->action, "add", 3)) {
797                 r = uev_add_path(uev, vecs);
798                 goto out;
799         }
800         if (!strncmp(uev->action, "remove", 6)) {
801                 r = uev_remove_path(uev, vecs);
802                 goto out;
803         }
804         if (!strncmp(uev->action, "change", 6)) {
805                 r = uev_update_path(uev, vecs);
806                 goto out;
807         }
808
809 out:
810         lock_cleanup_pop(vecs->lock);
811         return r;
812 }
813
814 static void *
815 ueventloop (void * ap)
816 {
817         block_signal(SIGUSR1, NULL);
818         block_signal(SIGHUP, NULL);
819
820         if (uevent_listen())
821                 condlog(0, "error starting uevent listener");
822
823         return NULL;
824 }
825
826 static void *
827 uevqloop (void * ap)
828 {
829         block_signal(SIGUSR1, NULL);
830         block_signal(SIGHUP, NULL);
831
832         if (uevent_dispatch(&uev_trigger, ap))
833                 condlog(0, "error starting uevent dispatcher");
834
835         return NULL;
836 }
837 static void *
838 uxlsnrloop (void * ap)
839 {
840         block_signal(SIGUSR1, NULL);
841         block_signal(SIGHUP, NULL);
842
843         if (cli_init())
844                 return NULL;
845
846         set_handler_callback(LIST+PATHS, cli_list_paths);
847         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
848         set_handler_callback(LIST+MAPS, cli_list_maps);
849         set_handler_callback(LIST+STATUS, cli_list_status);
850         set_handler_callback(LIST+DAEMON, cli_list_daemon);
851         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
852         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
853         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
854         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
855         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
856         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
857         set_handler_callback(LIST+CONFIG, cli_list_config);
858         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
859         set_handler_callback(LIST+DEVICES, cli_list_devices);
860         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
861         set_handler_callback(ADD+PATH, cli_add_path);
862         set_handler_callback(DEL+PATH, cli_del_path);
863         set_handler_callback(ADD+MAP, cli_add_map);
864         set_handler_callback(DEL+MAP, cli_del_map);
865         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
866         set_handler_callback(RECONFIGURE, cli_reconfigure);
867         set_handler_callback(SUSPEND+MAP, cli_suspend);
868         set_handler_callback(RESUME+MAP, cli_resume);
869         set_handler_callback(RESIZE+MAP, cli_resize);
870         set_handler_callback(RELOAD+MAP, cli_reload);
871         set_handler_callback(RESET+MAP, cli_reassign);
872         set_handler_callback(REINSTATE+PATH, cli_reinstate);
873         set_handler_callback(FAIL+PATH, cli_fail);
874         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
875         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
876         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
877         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
878         set_handler_callback(QUIT, cli_quit);
879         set_handler_callback(SHUTDOWN, cli_shutdown);
880         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
881         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
882         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
883
884         umask(077);
885         uxsock_listen(&uxsock_trigger, ap);
886
887         return NULL;
888 }
889
890 int
891 exit_daemon (int status)
892 {
893         if (status != 0)
894                 fprintf(stderr, "bad exit status. see daemon.log\n");
895
896         if (running_state != DAEMON_SHUTDOWN) {
897                 pthread_mutex_lock(&exit_mutex);
898                 pthread_cond_signal(&exit_cond);
899                 pthread_mutex_unlock(&exit_mutex);
900         }
901         return status;
902 }
903
904 const char *
905 daemon_status(void)
906 {
907         switch (running_state) {
908         case DAEMON_INIT:
909                 return "init";
910         case DAEMON_START:
911                 return "startup";
912         case DAEMON_CONFIGURE:
913                 return "configure";
914         case DAEMON_RUNNING:
915                 return "running";
916         case DAEMON_SHUTDOWN:
917                 return "shutdown";
918         }
919         return NULL;
920 }
921
922 static void
923 fail_path (struct path * pp, int del_active)
924 {
925         if (!pp->mpp)
926                 return;
927
928         condlog(2, "checker failed path %s in map %s",
929                  pp->dev_t, pp->mpp->alias);
930
931         dm_fail_path(pp->mpp->alias, pp->dev_t);
932         if (del_active)
933                 update_queue_mode_del_path(pp->mpp);
934 }
935
936 /*
937  * caller must have locked the path list before calling that function
938  */
939 static void
940 reinstate_path (struct path * pp, int add_active)
941 {
942         if (!pp->mpp)
943                 return;
944
945         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
946                 condlog(0, "%s: reinstate failed", pp->dev_t);
947         else {
948                 condlog(2, "%s: reinstated", pp->dev_t);
949                 if (add_active)
950                         update_queue_mode_add_path(pp->mpp);
951         }
952 }
953
954 static void
955 enable_group(struct path * pp)
956 {
957         struct pathgroup * pgp;
958
959         /*
960          * if path is added through uev_add_path, pgindex can be unset.
961          * next update_strings() will set it, upon map reload event.
962          *
963          * we can safely return here, because upon map reload, all
964          * PG will be enabled.
965          */
966         if (!pp->mpp->pg || !pp->pgindex)
967                 return;
968
969         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
970
971         if (pgp->status == PGSTATE_DISABLED) {
972                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
973                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
974         }
975 }
976
977 static void
978 mpvec_garbage_collector (struct vectors * vecs)
979 {
980         struct multipath * mpp;
981         unsigned int i;
982
983         if (!vecs->mpvec)
984                 return;
985
986         vector_foreach_slot (vecs->mpvec, mpp, i) {
987                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
988                         condlog(2, "%s: remove dead map", mpp->alias);
989                         remove_map_and_stop_waiter(mpp, vecs, 1);
990                         i--;
991                 }
992         }
993 }
994
995 /* This is called after a path has started working again. It the multipath
996  * device for this path uses the followover failback type, and this is the
997  * best pathgroup, and this is the first path in the pathgroup to come back
998  * up, then switch to this pathgroup */
999 static int
1000 followover_should_failback(struct path * pp)
1001 {
1002         struct pathgroup * pgp;
1003         struct path *pp1;
1004         int i;
1005
1006         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1007             !pp->mpp->pg || !pp->pgindex ||
1008             pp->pgindex != pp->mpp->bestpg)
1009                 return 0;
1010
1011         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1012         vector_foreach_slot(pgp->paths, pp1, i) {
1013                 if (pp1 == pp)
1014                         continue;
1015                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1016                         return 0;
1017         }
1018         return 1;
1019 }
1020
1021 static void
1022 defered_failback_tick (vector mpvec)
1023 {
1024         struct multipath * mpp;
1025         unsigned int i;
1026
1027         vector_foreach_slot (mpvec, mpp, i) {
1028                 /*
1029                  * defered failback getting sooner
1030                  */
1031                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1032                         mpp->failback_tick--;
1033
1034                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1035                                 switch_pathgroup(mpp);
1036                 }
1037         }
1038 }
1039
1040 static void
1041 retry_count_tick(vector mpvec)
1042 {
1043         struct multipath *mpp;
1044         unsigned int i;
1045
1046         vector_foreach_slot (mpvec, mpp, i) {
1047                 if (mpp->retry_tick) {
1048                         mpp->stat_total_queueing_time++;
1049                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1050                         if(--mpp->retry_tick == 0) {
1051                                 dm_queue_if_no_path(mpp->alias, 0);
1052                                 condlog(2, "%s: Disable queueing", mpp->alias);
1053                         }
1054                 }
1055         }
1056 }
1057
1058 int update_prio(struct path *pp, int refresh_all)
1059 {
1060         int oldpriority;
1061         struct path *pp1;
1062         struct pathgroup * pgp;
1063         int i, j, changed = 0;
1064
1065         if (refresh_all) {
1066                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1067                         vector_foreach_slot (pgp->paths, pp1, j) {
1068                                 oldpriority = pp1->priority;
1069                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1070                                 if (pp1->priority != oldpriority)
1071                                         changed = 1;
1072                         }
1073                 }
1074                 return changed;
1075         }
1076         oldpriority = pp->priority;
1077         pathinfo(pp, conf->hwtable, DI_PRIO);
1078
1079         if (pp->priority == oldpriority)
1080                 return 0;
1081         return 1;
1082 }
1083
1084 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1085 {
1086         if (reload_map(vecs, mpp, refresh))
1087                 return 1;
1088
1089         dm_lib_release();
1090         if (setup_multipath(vecs, mpp) != 0)
1091                 return 1;
1092         sync_map_state(mpp);
1093
1094         return 0;
1095 }
1096
1097 void
1098 check_path (struct vectors * vecs, struct path * pp)
1099 {
1100         int newstate;
1101         int new_path_up = 0;
1102         int chkr_new_path_up = 0;
1103         int oldchkrstate = pp->chkrstate;
1104
1105         if (!pp->mpp)
1106                 return;
1107
1108         if (pp->tick && --pp->tick)
1109                 return; /* don't check this path yet */
1110
1111         /*
1112          * provision a next check soonest,
1113          * in case we exit abnormaly from here
1114          */
1115         pp->tick = conf->checkint;
1116
1117         newstate = path_offline(pp);
1118         if (newstate == PATH_UP)
1119                 newstate = get_state(pp, 1);
1120
1121         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1122                 condlog(2, "%s: unusable path", pp->dev);
1123                 pathinfo(pp, conf->hwtable, 0);
1124                 return;
1125         }
1126         /*
1127          * Async IO in flight. Keep the previous path state
1128          * and reschedule as soon as possible
1129          */
1130         if (newstate == PATH_PENDING) {
1131                 pp->tick = 1;
1132                 return;
1133         }
1134         /*
1135          * Synchronize with kernel state
1136          */
1137         if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1138                 condlog(1, "%s: Could not synchronize with kernel state",
1139                         pp->dev);
1140                 pp->dmstate = PSTATE_UNDEF;
1141         }
1142         pp->chkrstate = newstate;
1143         if (newstate != pp->state) {
1144                 int oldstate = pp->state;
1145                 pp->state = newstate;
1146                 LOG_MSG(1, checker_message(&pp->checker));
1147
1148                 /*
1149                  * upon state change, reset the checkint
1150                  * to the shortest delay
1151                  */
1152                 pp->checkint = conf->checkint;
1153
1154                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1155                         /*
1156                          * proactively fail path in the DM
1157                          */
1158                         if (oldstate == PATH_UP ||
1159                             oldstate == PATH_GHOST)
1160                                 fail_path(pp, 1);
1161                         else
1162                                 fail_path(pp, 0);
1163
1164                         /*
1165                          * cancel scheduled failback
1166                          */
1167                         pp->mpp->failback_tick = 0;
1168
1169                         pp->mpp->stat_path_failures++;
1170                         return;
1171                 }
1172
1173                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1174                         if ( pp->mpp && pp->mpp->prflag ){
1175                                 /*
1176                                  * Check Persistent Reservation.
1177                                  */
1178                         condlog(2, "%s: checking persistent reservation "
1179                                 "registration", pp->dev);
1180                         mpath_pr_event_handle(pp);
1181                         }
1182                 }
1183
1184                 /*
1185                  * reinstate this path
1186                  */
1187                 if (oldstate != PATH_UP &&
1188                     oldstate != PATH_GHOST)
1189                         reinstate_path(pp, 1);
1190                 else
1191                         reinstate_path(pp, 0);
1192
1193                 new_path_up = 1;
1194
1195                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1196                         chkr_new_path_up = 1;
1197
1198                 /*
1199                  * if at least one path is up in a group, and
1200                  * the group is disabled, re-enable it
1201                  */
1202                 if (newstate == PATH_UP)
1203                         enable_group(pp);
1204         }
1205         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1206                 if (pp->dmstate == PSTATE_FAILED ||
1207                     pp->dmstate == PSTATE_UNDEF) {
1208                         /* Clear IO errors */
1209                         reinstate_path(pp, 0);
1210                 } else {
1211                         LOG_MSG(4, checker_message(&pp->checker));
1212                         /*
1213                          * double the next check delay.
1214                          * max at conf->max_checkint
1215                          */
1216                         if (pp->checkint < (conf->max_checkint / 2))
1217                                 pp->checkint = 2 * pp->checkint;
1218                         else
1219                                 pp->checkint = conf->max_checkint;
1220
1221                         pp->tick = pp->checkint;
1222                         condlog(4, "%s: delay next check %is",
1223                                 pp->dev_t, pp->tick);
1224                 }
1225         }
1226         else if (newstate == PATH_DOWN) {
1227                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1228                         LOG_MSG(3, checker_message(&pp->checker));
1229                 else
1230                         LOG_MSG(2, checker_message(&pp->checker));
1231         }
1232
1233         pp->state = newstate;
1234
1235         /*
1236          * path prio refreshing
1237          */
1238         condlog(4, "path prio refresh");
1239
1240         if (update_prio(pp, new_path_up) &&
1241             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1242              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1243                 update_path_groups(pp->mpp, vecs, !new_path_up);
1244         else if (need_switch_pathgroup(pp->mpp, 0)) {
1245                 if (pp->mpp->pgfailback > 0 &&
1246                     (new_path_up || pp->mpp->failback_tick <= 0))
1247                         pp->mpp->failback_tick =
1248                                 pp->mpp->pgfailback + 1;
1249                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1250                          (chkr_new_path_up && followover_should_failback(pp)))
1251                         switch_pathgroup(pp->mpp);
1252         }
1253 }
1254
1255 static void *
1256 checkerloop (void *ap)
1257 {
1258         struct vectors *vecs;
1259         struct path *pp;
1260         int count = 0;
1261         unsigned int i;
1262         sigset_t old;
1263
1264         mlockall(MCL_CURRENT | MCL_FUTURE);
1265         vecs = (struct vectors *)ap;
1266         condlog(2, "path checkers start up");
1267
1268         /*
1269          * init the path check interval
1270          */
1271         vector_foreach_slot (vecs->pathvec, pp, i) {
1272                 pp->checkint = conf->checkint;
1273         }
1274
1275         while (1) {
1276                 block_signal(SIGHUP, &old);
1277                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1278                 lock(vecs->lock);
1279                 pthread_testcancel();
1280                 condlog(4, "tick");
1281
1282                 if (vecs->pathvec) {
1283                         vector_foreach_slot (vecs->pathvec, pp, i) {
1284                                 check_path(vecs, pp);
1285                         }
1286                 }
1287                 if (vecs->mpvec) {
1288                         defered_failback_tick(vecs->mpvec);
1289                         retry_count_tick(vecs->mpvec);
1290                 }
1291                 if (count)
1292                         count--;
1293                 else {
1294                         condlog(4, "map garbage collection");
1295                         mpvec_garbage_collector(vecs);
1296                         count = MAPGCINT;
1297                 }
1298
1299                 lock_cleanup_pop(vecs->lock);
1300                 pthread_sigmask(SIG_SETMASK, &old, NULL);
1301                 sleep(1);
1302         }
1303         return NULL;
1304 }
1305
1306 int
1307 configure (struct vectors * vecs, int start_waiters)
1308 {
1309         struct multipath * mpp;
1310         struct path * pp;
1311         vector mpvec;
1312         int i;
1313
1314         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1315                 return 1;
1316
1317         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1318                 return 1;
1319
1320         if (!(mpvec = vector_alloc()))
1321                 return 1;
1322
1323         /*
1324          * probe for current path (from sysfs) and map (from dm) sets
1325          */
1326         path_discovery(vecs->pathvec, conf, DI_ALL);
1327
1328         vector_foreach_slot (vecs->pathvec, pp, i){
1329                 if (filter_path(conf, pp) > 0){
1330                         vector_del_slot(vecs->pathvec, i);
1331                         free_path(pp);
1332                         i--;
1333                 }
1334                 else
1335                         pp->checkint = conf->checkint;
1336         }
1337         if (map_discovery(vecs))
1338                 return 1;
1339
1340         /*
1341          * create new set of maps & push changed ones into dm
1342          */
1343         if (coalesce_paths(vecs, mpvec, NULL, 1))
1344                 return 1;
1345
1346         /*
1347          * may need to remove some maps which are no longer relevant
1348          * e.g., due to blacklist changes in conf file
1349          */
1350         if (coalesce_maps(vecs, mpvec))
1351                 return 1;
1352
1353         dm_lib_release();
1354
1355         sync_maps_state(mpvec);
1356         vector_foreach_slot(mpvec, mpp, i){
1357                 update_map_pr(mpp);
1358         }
1359
1360         /*
1361          * purge dm of old maps
1362          */
1363         remove_maps(vecs);
1364
1365         /*
1366          * save new set of maps formed by considering current path state
1367          */
1368         vector_free(vecs->mpvec);
1369         vecs->mpvec = mpvec;
1370
1371         /*
1372          * start dm event waiter threads for these new maps
1373          */
1374         vector_foreach_slot(vecs->mpvec, mpp, i) {
1375                 if (setup_multipath(vecs, mpp))
1376                         return 1;
1377                 if (start_waiters)
1378                         if (start_waiter_thread(mpp, vecs))
1379                                 return 1;
1380         }
1381         return 0;
1382 }
1383
1384 int
1385 reconfigure (struct vectors * vecs)
1386 {
1387         struct config * old = conf;
1388         int retval = 1;
1389
1390         /*
1391          * free old map and path vectors ... they use old conf state
1392          */
1393         if (VECTOR_SIZE(vecs->mpvec))
1394                 remove_maps_and_stop_waiters(vecs);
1395
1396         if (VECTOR_SIZE(vecs->pathvec))
1397                 free_pathvec(vecs->pathvec, FREE_PATHS);
1398
1399         vecs->pathvec = NULL;
1400         conf = NULL;
1401
1402         if (!load_config(DEFAULT_CONFIGFILE)) {
1403                 conf->verbosity = old->verbosity;
1404                 conf->daemon = 1;
1405                 configure(vecs, 1);
1406                 free_config(old);
1407                 retval = 0;
1408         }
1409
1410         return retval;
1411 }
1412
1413 static struct vectors *
1414 init_vecs (void)
1415 {
1416         struct vectors * vecs;
1417
1418         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1419
1420         if (!vecs)
1421                 return NULL;
1422
1423         vecs->lock.mutex =
1424                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1425
1426         if (!vecs->lock.mutex)
1427                 goto out;
1428
1429         pthread_mutex_init(vecs->lock.mutex, NULL);
1430         vecs->lock.depth = 0;
1431
1432         return vecs;
1433
1434 out:
1435         FREE(vecs);
1436         condlog(0, "failed to init paths");
1437         return NULL;
1438 }
1439
1440 static void *
1441 signal_set(int signo, void (*func) (int))
1442 {
1443         int r;
1444         struct sigaction sig;
1445         struct sigaction osig;
1446
1447         sig.sa_handler = func;
1448         sigemptyset(&sig.sa_mask);
1449         sig.sa_flags = 0;
1450
1451         r = sigaction(signo, &sig, &osig);
1452
1453         if (r < 0)
1454                 return (SIG_ERR);
1455         else
1456                 return (osig.sa_handler);
1457 }
1458
1459 static void
1460 sighup (int sig)
1461 {
1462         condlog(2, "reconfigure (SIGHUP)");
1463
1464         if (running_state != DAEMON_RUNNING)
1465                 return;
1466
1467         reconfigure(gvecs);
1468
1469 #ifdef _DEBUG_
1470         dbg_free_final(NULL);
1471 #endif
1472 }
1473
1474 static void
1475 sigend (int sig)
1476 {
1477         exit_daemon(0);
1478 }
1479
1480 static void
1481 sigusr1 (int sig)
1482 {
1483         condlog(3, "SIGUSR1 received");
1484 }
1485
1486 static void
1487 signal_init(void)
1488 {
1489         signal_set(SIGHUP, sighup);
1490         signal_set(SIGUSR1, sigusr1);
1491         signal_set(SIGINT, sigend);
1492         signal_set(SIGTERM, sigend);
1493         signal(SIGPIPE, SIG_IGN);
1494 }
1495
1496 static void
1497 setscheduler (void)
1498 {
1499         int res;
1500         static struct sched_param sched_param = {
1501                 .sched_priority = 99
1502         };
1503
1504         res = sched_setscheduler (0, SCHED_RR, &sched_param);
1505
1506         if (res == -1)
1507                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1508         return;
1509 }
1510
1511 static void
1512 set_oom_adj (void)
1513 {
1514 #ifdef OOM_SCORE_ADJ_MIN
1515         int retry = 1;
1516         char *file = "/proc/self/oom_score_adj";
1517         int score = OOM_SCORE_ADJ_MIN;
1518 #else
1519         int retry = 0;
1520         char *file = "/proc/self/oom_adj";
1521         int score = OOM_ADJUST_MIN;
1522 #endif
1523         FILE *fp;
1524         struct stat st;
1525
1526         do {
1527                 if (stat(file, &st) == 0){
1528                         fp = fopen(file, "w");
1529                         if (!fp) {
1530                                 condlog(0, "couldn't fopen %s : %s", file,
1531                                         strerror(errno));
1532                                 return;
1533                         }
1534                         fprintf(fp, "%i", score);
1535                         fclose(fp);
1536                         return;
1537                 }
1538                 if (errno != ENOENT) {
1539                         condlog(0, "couldn't stat %s : %s", file,
1540                                 strerror(errno));
1541                         return;
1542                 }
1543 #ifdef OOM_ADJUST_MIN
1544                 file = "/proc/self/oom_adj";
1545                 score = OOM_ADJUST_MIN;
1546 #else
1547                 retry = 0;
1548 #endif
1549         } while (retry--);
1550         condlog(0, "couldn't adjust oom score");
1551 }
1552
1553 static int
1554 child (void * param)
1555 {
1556         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1557         pthread_attr_t log_attr, misc_attr;
1558         struct vectors * vecs;
1559         struct multipath * mpp;
1560         int i;
1561         sigset_t set;
1562         int rc, pid_rc;
1563
1564         mlockall(MCL_CURRENT | MCL_FUTURE);
1565
1566         setup_thread_attr(&misc_attr, 64 * 1024, 1);
1567         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1568
1569         if (logsink) {
1570                 setup_thread_attr(&log_attr, 64 * 1024, 0);
1571                 log_thread_start(&log_attr);
1572                 pthread_attr_destroy(&log_attr);
1573         }
1574
1575         running_state = DAEMON_START;
1576
1577         condlog(2, "--------start up--------");
1578         condlog(2, "read " DEFAULT_CONFIGFILE);
1579
1580         if (load_config(DEFAULT_CONFIGFILE))
1581                 exit(1);
1582
1583         if (init_checkers()) {
1584                 condlog(0, "failed to initialize checkers");
1585                 exit(1);
1586         }
1587         if (init_prio()) {
1588                 condlog(0, "failed to initialize prioritizers");
1589                 exit(1);
1590         }
1591
1592         setlogmask(LOG_UPTO(conf->verbosity + 3));
1593
1594         if (conf->max_fds) {
1595                 struct rlimit fd_limit;
1596
1597                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1598                         condlog(0, "can't get open fds limit: %s",
1599                                 strerror(errno));
1600                         fd_limit.rlim_cur = 0;
1601                         fd_limit.rlim_max = 0;
1602                 }
1603                 if (fd_limit.rlim_cur < conf->max_fds) {
1604                         fd_limit.rlim_cur = conf->max_fds;
1605                         if (fd_limit.rlim_max < conf->max_fds)
1606                                 fd_limit.rlim_max = conf->max_fds;
1607                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1608                                 condlog(0, "can't set open fds limit to "
1609                                         "%lu/%lu : %s",
1610                                         fd_limit.rlim_cur, fd_limit.rlim_max,
1611                                         strerror(errno));
1612                         } else {
1613                                 condlog(3, "set open fds limit to %lu/%lu",
1614                                         fd_limit.rlim_cur, fd_limit.rlim_max);
1615                         }
1616                 }
1617
1618         }
1619
1620         vecs = gvecs = init_vecs();
1621         if (!vecs)
1622                 exit(1);
1623
1624         signal_init();
1625         setscheduler();
1626         set_oom_adj();
1627
1628         conf->daemon = 1;
1629         udev_set_sync_support(0);
1630         /*
1631          * Start uevent listener early to catch events
1632          */
1633         if ((rc = pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs))) {
1634                 condlog(0, "failed to create uevent thread: %d", rc);
1635                 exit(1);
1636         }
1637         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1638                 condlog(0, "failed to create cli listener: %d", rc);
1639                 exit(1);
1640         }
1641         /*
1642          * fetch and configure both paths and multipaths
1643          */
1644         running_state = DAEMON_CONFIGURE;
1645
1646         lock(vecs->lock);
1647         if (configure(vecs, 1)) {
1648                 unlock(vecs->lock);
1649                 condlog(0, "failure during configuration");
1650                 exit(1);
1651         }
1652         unlock(vecs->lock);
1653
1654         /*
1655          * start threads
1656          */
1657         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1658                 condlog(0,"failed to create checker loop thread: %d", rc);
1659                 exit(1);
1660         }
1661         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1662                 condlog(0, "failed to create uevent dispatcher: %d", rc);
1663                 exit(1);
1664         }
1665         pthread_attr_destroy(&misc_attr);
1666
1667         pthread_mutex_lock(&exit_mutex);
1668         /* Startup complete, create logfile */
1669         pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1670         /* Ignore errors, we can live without */
1671
1672         running_state = DAEMON_RUNNING;
1673         pthread_cond_wait(&exit_cond, &exit_mutex);
1674         /* Need to block these to avoid deadlocking */
1675         sigemptyset(&set);
1676         sigaddset(&set, SIGTERM);
1677         sigaddset(&set, SIGINT);
1678         pthread_sigmask(SIG_BLOCK, &set, NULL);
1679
1680         /*
1681          * exit path
1682          */
1683         running_state = DAEMON_SHUTDOWN;
1684         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1685         block_signal(SIGHUP, NULL);
1686         lock(vecs->lock);
1687         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1688                 vector_foreach_slot(vecs->mpvec, mpp, i)
1689                         dm_queue_if_no_path(mpp->alias, 0);
1690         remove_maps_and_stop_waiters(vecs);
1691         unlock(vecs->lock);
1692
1693         pthread_cancel(check_thr);
1694         pthread_cancel(uevent_thr);
1695         pthread_cancel(uxlsnr_thr);
1696         pthread_cancel(uevq_thr);
1697
1698         lock(vecs->lock);
1699         free_pathvec(vecs->pathvec, FREE_PATHS);
1700         vecs->pathvec = NULL;
1701         unlock(vecs->lock);
1702         /* Now all the waitevent threads will start rushing in. */
1703         while (vecs->lock.depth > 0) {
1704                 sleep (1); /* This is weak. */
1705                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1706                         " waiting...", vecs->lock.depth);
1707         }
1708         pthread_mutex_destroy(vecs->lock.mutex);
1709         FREE(vecs->lock.mutex);
1710         vecs->lock.depth = 0;
1711         vecs->lock.mutex = NULL;
1712         FREE(vecs);
1713         vecs = NULL;
1714
1715         cleanup_checkers();
1716         cleanup_prio();
1717
1718         dm_lib_release();
1719         dm_lib_exit();
1720
1721         /* We're done here */
1722         if (!pid_rc) {
1723                 condlog(3, "unlink pidfile");
1724                 unlink(DEFAULT_PIDFILE);
1725         }
1726
1727         condlog(2, "--------shut down-------");
1728
1729         if (logsink)
1730                 log_thread_stop();
1731
1732         /*
1733          * Freeing config must be done after condlog() and dm_lib_exit(),
1734          * because logging functions like dlog() and dm_write_log()
1735          * reference the config.
1736          */
1737         free_config(conf);
1738         conf = NULL;
1739
1740 #ifdef _DEBUG_
1741         dbg_free_final(NULL);
1742 #endif
1743
1744         exit(0);
1745 }
1746
1747 static int
1748 daemonize(void)
1749 {
1750         int pid;
1751         int dev_null_fd;
1752
1753         if( (pid = fork()) < 0){
1754                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1755                 return -1;
1756         }
1757         else if (pid != 0)
1758                 return pid;
1759
1760         setsid();
1761
1762         if ( (pid = fork()) < 0)
1763                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1764         else if (pid != 0)
1765                 _exit(0);
1766
1767         if (chdir("/") < 0)
1768                 fprintf(stderr, "cannot chdir to '/', continuing\n");
1769
1770         dev_null_fd = open("/dev/null", O_RDWR);
1771         if (dev_null_fd < 0){
1772                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1773                         strerror(errno));
1774                 _exit(0);
1775         }
1776
1777         close(STDIN_FILENO);
1778         if (dup(dev_null_fd) < 0) {
1779                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1780                         strerror(errno));
1781                 _exit(0);
1782         }
1783         close(STDOUT_FILENO);
1784         if (dup(dev_null_fd) < 0) {
1785                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1786                         strerror(errno));
1787                 _exit(0);
1788         }
1789         close(STDERR_FILENO);
1790         if (dup(dev_null_fd) < 0) {
1791                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1792                         strerror(errno));
1793                 _exit(0);
1794         }
1795         close(dev_null_fd);
1796         daemon_pid = getpid();
1797         return 0;
1798 }
1799
1800 int
1801 main (int argc, char *argv[])
1802 {
1803         extern char *optarg;
1804         extern int optind;
1805         int arg;
1806         int err;
1807
1808         logsink = 1;
1809         running_state = DAEMON_INIT;
1810         dm_init();
1811
1812         if (getuid() != 0) {
1813                 fprintf(stderr, "need to be root\n");
1814                 exit(1);
1815         }
1816
1817         /* make sure we don't lock any path */
1818         if (chdir("/") < 0)
1819                 fprintf(stderr, "can't chdir to root directory : %s\n",
1820                         strerror(errno));
1821         umask(umask(077) | 022);
1822
1823         conf = alloc_config();
1824
1825         if (!conf)
1826                 exit(1);
1827
1828         while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1829         switch(arg) {
1830                 case 'd':
1831                         logsink = 0;
1832                         //debug=1; /* ### comment me out ### */
1833                         break;
1834                 case 'v':
1835                         if (sizeof(optarg) > sizeof(char *) ||
1836                             !isdigit(optarg[0]))
1837                                 exit(1);
1838
1839                         conf->verbosity = atoi(optarg);
1840                         break;
1841                 case 'k':
1842                         uxclnt(optarg);
1843                         exit(0);
1844                 default:
1845                         ;
1846                 }
1847         }
1848         if (optind < argc) {
1849                 char cmd[CMDSIZE];
1850                 char * s = cmd;
1851                 char * c = s;
1852
1853                 while (optind < argc) {
1854                         if (strchr(argv[optind], ' '))
1855                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1856                         else
1857                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1858                         optind++;
1859                 }
1860                 c += snprintf(c, s + CMDSIZE - c, "\n");
1861                 uxclnt(s);
1862                 exit(0);
1863         }
1864
1865         if (!logsink)
1866                 err = 0;
1867         else
1868                 err = daemonize();
1869
1870         if (err < 0)
1871                 /* error */
1872                 exit(1);
1873         else if (err > 0)
1874                 /* parent dies */
1875                 exit(0);
1876         else
1877                 /* child lives */
1878                 return (child(NULL));
1879 }
1880
1881 void *  mpath_pr_event_handler_fn (void * pathp )
1882 {
1883         struct multipath * mpp;
1884         int i,j, ret, isFound;
1885         struct path * pp = (struct path *)pathp;
1886         unsigned char *keyp;
1887         uint64_t prkey;
1888         struct prout_param_descriptor *param;
1889         struct prin_resp *resp;
1890
1891         mpp = pp->mpp;
1892
1893         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1894         if (!resp){
1895                 condlog(0,"%s Alloc failed for prin response", pp->dev);
1896                 return NULL;
1897         }
1898
1899         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1900         if (ret != MPATH_PR_SUCCESS )
1901         {
1902                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1903                 goto out;
1904         }
1905
1906         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1907                         resp->prin_descriptor.prin_readkeys.additional_length );
1908
1909         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1910         {
1911                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1912                 ret = MPATH_PR_SUCCESS;
1913                 goto out;
1914         }
1915         prkey = 0;
1916         keyp = (unsigned char *)mpp->reservation_key;
1917         for (j = 0; j < 8; ++j) {
1918                 if (j > 0)
1919                         prkey <<= 8;
1920                 prkey |= *keyp;
1921                 ++keyp;
1922         }
1923         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
1924
1925         isFound =0;
1926         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1927         {
1928                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
1929                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1930                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1931                 {
1932                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1933                         isFound =1;
1934                         break;
1935                 }
1936         }
1937         if (!isFound)
1938         {
1939                 condlog(0, "%s: Either device not registered or ", pp->dev);
1940                 condlog(0, "host is not authorised for registration. Skip path");
1941                 ret = MPATH_PR_OTHER;
1942                 goto out;
1943         }
1944
1945         param= malloc(sizeof(struct prout_param_descriptor));
1946         memset(param, 0 , sizeof(struct prout_param_descriptor));
1947
1948         for (j = 7; j >= 0; --j) {
1949                 param->sa_key[j] = (prkey & 0xff);
1950                 prkey >>= 8;
1951         }
1952         param->num_transportid = 0;
1953
1954         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1955
1956         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1957         if (ret != MPATH_PR_SUCCESS )
1958         {
1959                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1960         }
1961         mpp->prflag = 1;
1962
1963         free(param);
1964 out:
1965         free(resp);
1966         return NULL;
1967 }
1968
1969 int mpath_pr_event_handle(struct path *pp)
1970 {
1971         pthread_t thread;
1972         int rc;
1973         pthread_attr_t attr;
1974         struct multipath * mpp;
1975
1976         mpp = pp->mpp;
1977
1978         if (!mpp->reservation_key)
1979                 return -1;
1980
1981         pthread_attr_init(&attr);
1982         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
1983
1984         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
1985         if (rc) {
1986                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
1987                 return -1;
1988         }
1989         pthread_attr_destroy(&attr);
1990         rc = pthread_join(thread, NULL);
1991         return 0;
1992 }
1993