Add missing includes for remember_wwid
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <mpath_persist.h>
21
22 /*
23  * libcheckers
24  */
25 #include <checkers.h>
26
27 /*
28  * libmultipath
29  */
30 #include <parser.h>
31 #include <vector.h>
32 #include <memory.h>
33 #include <config.h>
34 #include <util.h>
35 #include <hwtable.h>
36 #include <defaults.h>
37 #include <structs.h>
38 #include <blacklist.h>
39 #include <structs_vec.h>
40 #include <dmparser.h>
41 #include <devmapper.h>
42 #include <sysfs.h>
43 #include <dict.h>
44 #include <discovery.h>
45 #include <debug.h>
46 #include <propsel.h>
47 #include <uevent.h>
48 #include <switchgroup.h>
49 #include <print.h>
50 #include <configure.h>
51 #include <prio.h>
52 #include <pgpolicies.h>
53 #include <uevent.h>
54
55 #include "main.h"
56 #include "pidfile.h"
57 #include "uxlsnr.h"
58 #include "uxclnt.h"
59 #include "cli.h"
60 #include "cli_handlers.h"
61 #include "lock.h"
62 #include "waiter.h"
63 #include "wwids.h"
64
65 #define FILE_NAME_SIZE 256
66 #define CMDSIZE 160
67
68 #define LOG_MSG(a, b) \
69 do { \
70         if (pp->offline) \
71                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
72         else if (strlen(b)) \
73                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
74 } while(0)
75
76 struct mpath_event_param
77 {
78         char * devname;
79         struct multipath *mpp;
80 };
81
82 unsigned int mpath_mx_alloc_len;
83
84 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
85 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
86
87 int logsink;
88 enum daemon_status running_state;
89 pid_t daemon_pid;
90
91 /*
92  * global copy of vecs for use in sig handlers
93  */
94 struct vectors * gvecs;
95
96 static int
97 need_switch_pathgroup (struct multipath * mpp, int refresh)
98 {
99         struct pathgroup * pgp;
100         struct path * pp;
101         unsigned int i, j;
102
103         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
104                 return 0;
105
106         /*
107          * Refresh path priority values
108          */
109         if (refresh)
110                 vector_foreach_slot (mpp->pg, pgp, i)
111                         vector_foreach_slot (pgp->paths, pp, j)
112                                 pathinfo(pp, conf->hwtable, DI_PRIO);
113
114         mpp->bestpg = select_path_group(mpp);
115
116         if (mpp->bestpg != mpp->nextpg)
117                 return 1;
118
119         return 0;
120 }
121
122 static void
123 switch_pathgroup (struct multipath * mpp)
124 {
125         mpp->stat_switchgroup++;
126         dm_switchgroup(mpp->alias, mpp->bestpg);
127         condlog(2, "%s: switch to path group #%i",
128                  mpp->alias, mpp->bestpg);
129 }
130
131 static int
132 coalesce_maps(struct vectors *vecs, vector nmpv)
133 {
134         struct multipath * ompp;
135         vector ompv = vecs->mpvec;
136         unsigned int i;
137         int j;
138
139         vector_foreach_slot (ompv, ompp, i) {
140                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
141                         /*
142                          * remove all current maps not allowed by the
143                          * current configuration
144                          */
145                         if (dm_flush_map(ompp->alias)) {
146                                 condlog(0, "%s: unable to flush devmap",
147                                         ompp->alias);
148                                 /*
149                                  * may be just because the device is open
150                                  */
151                                 if (!vector_alloc_slot(nmpv))
152                                         return 1;
153
154                                 vector_set_slot(nmpv, ompp);
155                                 setup_multipath(vecs, ompp);
156
157                                 if ((j = find_slot(ompv, (void *)ompp)) != -1)
158                                         vector_del_slot(ompv, j);
159
160                                 continue;
161                         }
162                         else {
163                                 dm_lib_release();
164                                 condlog(2, "%s devmap removed", ompp->alias);
165                         }
166                 } else if (conf->reassign_maps) {
167                         condlog(3, "%s: Reassign existing device-mapper"
168                                 " devices", ompp->alias);
169                         dm_reassign(ompp->alias);
170                 }
171         }
172         return 0;
173 }
174
175 void
176 sync_map_state(struct multipath *mpp)
177 {
178         struct pathgroup *pgp;
179         struct path *pp;
180         unsigned int i, j;
181
182         if (!mpp->pg)
183                 return;
184
185         vector_foreach_slot (mpp->pg, pgp, i){
186                 vector_foreach_slot (pgp->paths, pp, j){
187                         if (pp->state == PATH_UNCHECKED || 
188                             pp->state == PATH_WILD)
189                                 continue;
190                         if ((pp->dmstate == PSTATE_FAILED ||
191                              pp->dmstate == PSTATE_UNDEF) &&
192                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
193                                 dm_reinstate_path(mpp->alias, pp->dev_t);
194                         else if ((pp->dmstate == PSTATE_ACTIVE ||
195                                   pp->dmstate == PSTATE_UNDEF) &&
196                                  (pp->state == PATH_DOWN ||
197                                   pp->state == PATH_SHAKY))
198                                 dm_fail_path(mpp->alias, pp->dev_t);
199                 }
200         }
201 }
202
203 static void
204 sync_maps_state(vector mpvec)
205 {
206         unsigned int i;
207         struct multipath *mpp;
208
209         vector_foreach_slot (mpvec, mpp, i)
210                 sync_map_state(mpp);
211 }
212
213 static int
214 flush_map(struct multipath * mpp, struct vectors * vecs)
215 {
216         /*
217          * clear references to this map before flushing so we can ignore
218          * the spurious uevent we may generate with the dm_flush_map call below
219          */
220         if (dm_flush_map(mpp->alias)) {
221                 /*
222                  * May not really be an error -- if the map was already flushed
223                  * from the device mapper by dmsetup(8) for instance.
224                  */
225                 condlog(0, "%s: can't flush", mpp->alias);
226                 return 1;
227         }
228         else {
229                 dm_lib_release();
230                 condlog(2, "%s: devmap removed", mpp->alias);
231         }
232
233         orphan_paths(vecs->pathvec, mpp);
234         remove_map_and_stop_waiter(mpp, vecs, 1);
235
236         return 0;
237 }
238
239 static int
240 uev_add_map (struct uevent * uev, struct vectors * vecs)
241 {
242         char *alias;
243         int major = -1, minor = -1, rc;
244
245         condlog(3, "%s: add map (uevent)", uev->kernel);
246         alias = uevent_get_dm_name(uev);
247         if (!alias) {
248                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
249                 major = uevent_get_major(uev);
250                 minor = uevent_get_minor(uev);
251                 alias = dm_mapname(major, minor);
252                 if (!alias) {
253                         condlog(2, "%s: mapname not found for %d:%d",
254                                 uev->kernel, major, minor);
255                         return 1;
256                 }
257         }
258         rc = ev_add_map(uev->kernel, alias, vecs);
259         FREE(alias);
260         return rc;
261 }
262
263 int
264 ev_add_map (char * dev, char * alias, struct vectors * vecs)
265 {
266         char * refwwid;
267         struct multipath * mpp;
268         int map_present;
269         int r = 1;
270
271         map_present = dm_map_present(alias);
272
273         if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
274                 condlog(4, "%s: not a multipath map", alias);
275                 return 0;
276         }
277
278         mpp = find_mp_by_alias(vecs->mpvec, alias);
279
280         if (mpp) {
281                 /*
282                  * Not really an error -- we generate our own uevent
283                  * if we create a multipath mapped device as a result
284                  * of uev_add_path
285                  */
286                 if (conf->reassign_maps) {
287                         condlog(3, "%s: Reassign existing device-mapper devices",
288                                 alias);
289                         dm_reassign(alias);
290                 }
291                 return 0;
292         }
293         condlog(2, "%s: adding map", alias);
294
295         /*
296          * now we can register the map
297          */
298         if (map_present && (mpp = add_map_without_path(vecs, alias))) {
299                 sync_map_state(mpp);
300                 condlog(2, "%s: devmap %s registered", alias, dev);
301                 return 0;
302         }
303         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
304
305         if (refwwid) {
306                 r = coalesce_paths(vecs, NULL, refwwid, 0);
307                 dm_lib_release();
308         }
309
310         if (!r)
311                 condlog(2, "%s: devmap %s added", alias, dev);
312         else if (r == 2)
313                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
314         else
315                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
316
317         FREE(refwwid);
318         return r;
319 }
320
321 static int
322 uev_remove_map (struct uevent * uev, struct vectors * vecs)
323 {
324         char *alias;
325         int minor;
326         struct multipath *mpp;
327
328         condlog(2, "%s: remove map (uevent)", uev->kernel);
329         alias = uevent_get_dm_name(uev);
330         if (!alias) {
331                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
332                 return 0;
333         }
334         minor = uevent_get_minor(uev);
335         mpp = find_mp_by_minor(vecs->mpvec, minor);
336
337         if (!mpp) {
338                 condlog(2, "%s: devmap not registered, can't remove",
339                         uev->kernel);
340                 goto out;
341         }
342         if (strcmp(mpp->alias, alias)) {
343                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
344                         mpp->alias, mpp->dmi->minor, minor);
345                 goto out;
346         }
347
348         orphan_paths(vecs->pathvec, mpp);
349         remove_map_and_stop_waiter(mpp, vecs, 1);
350 out:
351         FREE(alias);
352         return 0;
353 }
354
355 int
356 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
357 {
358         struct multipath * mpp;
359
360         mpp = find_mp_by_minor(vecs->mpvec, minor);
361
362         if (!mpp) {
363                 condlog(2, "%s: devmap not registered, can't remove",
364                         devname);
365                 return 0;
366         }
367         if (strcmp(mpp->alias, alias)) {
368                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
369                         mpp->alias, mpp->dmi->minor, minor);
370                 return 0;
371         }
372         return flush_map(mpp, vecs);
373 }
374
375 static int
376 uev_add_path (struct uevent *uev, struct vectors * vecs)
377 {
378         struct path *pp;
379         int ret, i;
380
381         condlog(2, "%s: add path (uevent)", uev->kernel);
382         if (strstr(uev->kernel, "..") != NULL) {
383                 /*
384                  * Don't allow relative device names in the pathvec
385                  */
386                 condlog(0, "%s: path name is invalid", uev->kernel);
387                 return 1;
388         }
389
390         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
391         if (pp) {
392                 condlog(0, "%s: spurious uevent, path already in pathvec",
393                         uev->kernel);
394                 if (pp->mpp)
395                         return 0;
396                 if (!strlen(pp->wwid)) {
397                         udev_device_unref(pp->udev);
398                         pp->udev = udev_device_ref(uev->udev);
399                         ret = pathinfo(pp, conf->hwtable,
400                                        DI_ALL | DI_BLACKLIST);
401                         if (ret == 2) {
402                                 i = find_slot(vecs->pathvec, (void *)pp);
403                                 if (i != -1)
404                                         vector_del_slot(vecs->pathvec, i);
405                                 free_path(pp);
406                                 return 0;
407                         } else if (ret == 1) {
408                                 condlog(0, "%s: failed to reinitialize path",
409                                         uev->kernel);
410                                 return 1;
411                         }
412                 }
413         } else {
414                 /*
415                  * get path vital state
416                  */
417                 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
418                                      uev->udev, DI_ALL, &pp);
419                 if (!pp) {
420                         if (ret == 2)
421                                 return 0;
422                         condlog(0, "%s: failed to store path info",
423                                 uev->kernel);
424                         return 1;
425                 }
426                 pp->checkint = conf->checkint;
427         }
428
429         return ev_add_path(pp, vecs);
430 }
431
432 /*
433  * returns:
434  * 0: added
435  * 1: error
436  */
437 int
438 ev_add_path (struct path * pp, struct vectors * vecs)
439 {
440         struct multipath * mpp;
441         char empty_buff[WWID_SIZE] = {0};
442         char params[PARAMS_SIZE] = {0};
443         int retries = 3;
444         int start_waiter = 0;
445
446         /*
447          * need path UID to go any further
448          */
449         if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
450                 condlog(0, "%s: failed to get path uid", pp->dev);
451                 goto fail; /* leave path added to pathvec */
452         }
453         mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
454 rescan:
455         if (mpp) {
456                 if ((!pp->size) || (mpp->size != pp->size)) {
457                         if (!pp->size)
458                                 condlog(0, "%s: failed to add new path %s, "
459                                         "device size is 0",
460                                         mpp->alias, pp->dev);
461                         else
462                                 condlog(0, "%s: failed to add new path %s, "
463                                         "device size mismatch",
464                                         mpp->alias, pp->dev);
465                         int i = find_slot(vecs->pathvec, (void *)pp);
466                         if (i != -1)
467                                 vector_del_slot(vecs->pathvec, i);
468                         free_path(pp);
469                         return 1;
470                 }
471
472                 condlog(4,"%s: adopting all paths for path %s",
473                         mpp->alias, pp->dev);
474                 if (adopt_paths(vecs->pathvec, mpp, 1))
475                         goto fail; /* leave path added to pathvec */
476
477                 verify_paths(mpp, vecs, NULL);
478                 mpp->flush_on_last_del = FLUSH_UNDEF;
479                 mpp->action = ACT_RELOAD;
480         }
481         else {
482                 if (!pp->size) {
483                         condlog(0, "%s: failed to create new map,"
484                                 " device size is 0 ", pp->dev);
485                         int i = find_slot(vecs->pathvec, (void *)pp);
486                         if (i != -1)
487                                 vector_del_slot(vecs->pathvec, i);
488                         free_path(pp);
489                         return 1;
490                 }
491
492                 condlog(4,"%s: creating new map", pp->dev);
493                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
494                         mpp->action = ACT_CREATE;
495                         /*
496                          * We don't depend on ACT_CREATE, as domap will
497                          * set it to ACT_NOTHING when complete.
498                          */
499                         start_waiter = 1;
500                 }
501                 else
502                         goto fail; /* leave path added to pathvec */
503         }
504
505         /* persistent reseravtion check*/
506         mpath_pr_event_handle(pp);      
507
508         /*
509          * push the map to the device-mapper
510          */
511         if (setup_map(mpp, params, PARAMS_SIZE)) {
512                 condlog(0, "%s: failed to setup map for addition of new "
513                         "path %s", mpp->alias, pp->dev);
514                 goto fail_map;
515         }
516         /*
517          * reload the map for the multipath mapped device
518          */
519         if (domap(mpp, params) <= 0) {
520                 condlog(0, "%s: failed in domap for addition of new "
521                         "path %s", mpp->alias, pp->dev);
522                 /*
523                  * deal with asynchronous uevents :((
524                  */
525                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
526                         condlog(0, "%s: uev_add_path sleep", mpp->alias);
527                         sleep(1);
528                         update_mpp_paths(mpp, vecs->pathvec);
529                         goto rescan;
530                 }
531                 else if (mpp->action == ACT_RELOAD)
532                         condlog(0, "%s: giving up reload", mpp->alias);
533                 else
534                         goto fail_map;
535         }
536         dm_lib_release();
537
538         /*
539          * update our state from kernel regardless of create or reload
540          */
541         if (setup_multipath(vecs, mpp))
542                 goto fail; /* if setup_multipath fails, it removes the map */
543
544         sync_map_state(mpp);
545
546         if ((mpp->action == ACT_CREATE ||
547              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
548             start_waiter_thread(mpp, vecs))
549                         goto fail_map;
550
551         if (retries >= 0) {
552                 condlog(2, "%s [%s]: path added to devmap %s",
553                         pp->dev, pp->dev_t, mpp->alias);
554                 return 0;
555         }
556         else
557                 return 1;
558
559 fail_map:
560         remove_map(mpp, vecs, 1);
561 fail:
562         orphan_path(pp);
563         return 1;
564 }
565
566 static int
567 uev_remove_path (struct uevent *uev, struct vectors * vecs)
568 {
569         struct path *pp;
570
571         condlog(2, "%s: remove path (uevent)", uev->kernel);
572         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
573
574         if (!pp) {
575                 /* Not an error; path might have been purged earlier */
576                 condlog(0, "%s: path already removed", uev->kernel);
577                 return 0;
578         }
579
580         return ev_remove_path(pp, vecs);
581 }
582
583 int
584 ev_remove_path (struct path *pp, struct vectors * vecs)
585 {
586         struct multipath * mpp;
587         int i, retval = 0;
588         char params[PARAMS_SIZE] = {0};
589
590         /*
591          * avoid referring to the map of an orphaned path
592          */
593         if ((mpp = pp->mpp)) {
594                 /*
595                  * transform the mp->pg vector of vectors of paths
596                  * into a mp->params string to feed the device-mapper
597                  */
598                 if (update_mpp_paths(mpp, vecs->pathvec)) {
599                         condlog(0, "%s: failed to update paths",
600                                 mpp->alias);
601                         goto fail;
602                 }
603                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
604                         vector_del_slot(mpp->paths, i);
605
606                 /*
607                  * remove the map IFF removing the last path
608                  */
609                 if (VECTOR_SIZE(mpp->paths) == 0) {
610                         char alias[WWID_SIZE];
611
612                         /*
613                          * flush_map will fail if the device is open
614                          */
615                         strncpy(alias, mpp->alias, WWID_SIZE);
616                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
617                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
618                                 mpp->retry_tick = 0;
619                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
620                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
621                                 dm_queue_if_no_path(mpp->alias, 0);
622                         }
623                         if (!flush_map(mpp, vecs)) {
624                                 condlog(2, "%s: removed map after"
625                                         " removing all paths",
626                                         alias);
627                                 retval = 0;
628                                 goto out;
629                         }
630                         /*
631                          * Not an error, continue
632                          */
633                 }
634
635                 if (setup_map(mpp, params, PARAMS_SIZE)) {
636                         condlog(0, "%s: failed to setup map for"
637                                 " removal of path %s", mpp->alias, pp->dev);
638                         goto fail;
639                 }
640                 /*
641                  * reload the map
642                  */
643                 mpp->action = ACT_RELOAD;
644                 if (domap(mpp, params) <= 0) {
645                         condlog(0, "%s: failed in domap for "
646                                 "removal of path %s",
647                                 mpp->alias, pp->dev);
648                         retval = 1;
649                 } else {
650                         /*
651                          * update our state from kernel
652                          */
653                         if (setup_multipath(vecs, mpp)) {
654                                 goto fail;
655                         }
656                         sync_map_state(mpp);
657
658                         condlog(2, "%s [%s]: path removed from map %s",
659                                 pp->dev, pp->dev_t, mpp->alias);
660                 }
661         }
662
663 out:
664         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
665                 vector_del_slot(vecs->pathvec, i);
666
667         free_path(pp);
668
669         return retval;
670
671 fail:
672         remove_map_and_stop_waiter(mpp, vecs, 1);
673         return 1;
674 }
675
676 static int
677 uev_update_path (struct uevent *uev, struct vectors * vecs)
678 {
679         int ro, retval = 0;
680
681         ro = uevent_get_disk_ro(uev);
682
683         if (ro >= 0) {
684                 struct path * pp;
685
686                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
687                         uev->kernel, ro);
688                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
689                 if (!pp) {
690                         condlog(0, "%s: spurious uevent, path not found",
691                                 uev->kernel);
692                         return 1;
693                 }
694                 if (pp->mpp) {
695                         retval = reload_map(vecs, pp->mpp, 0);
696
697                         condlog(2, "%s: map %s reloaded (retval %d)",
698                                 uev->kernel, pp->mpp->alias, retval);
699                 }
700
701         }
702
703         return retval;
704 }
705
706 static int
707 map_discovery (struct vectors * vecs)
708 {
709         struct multipath * mpp;
710         unsigned int i;
711
712         if (dm_get_maps(vecs->mpvec))
713                 return 1;
714
715         vector_foreach_slot (vecs->mpvec, mpp, i)
716                 if (setup_multipath(vecs, mpp))
717                         return 1;
718
719         return 0;
720 }
721
722 int
723 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
724 {
725         struct vectors * vecs;
726         int r;
727
728         *reply = NULL;
729         *len = 0;
730         vecs = (struct vectors *)trigger_data;
731
732         pthread_cleanup_push(cleanup_lock, &vecs->lock);
733         lock(vecs->lock);
734         pthread_testcancel();
735
736         r = parse_cmd(str, reply, len, vecs);
737
738         if (r > 0) {
739                 *reply = STRDUP("fail\n");
740                 *len = strlen(*reply) + 1;
741                 r = 1;
742         }
743         else if (!r && *len == 0) {
744                 *reply = STRDUP("ok\n");
745                 *len = strlen(*reply) + 1;
746                 r = 0;
747         }
748         /* else if (r < 0) leave *reply alone */
749
750         lock_cleanup_pop(vecs->lock);
751         return r;
752 }
753
754 static int
755 uev_discard(char * devpath)
756 {
757         char *tmp;
758         char a[11], b[11];
759
760         /*
761          * keep only block devices, discard partitions
762          */
763         tmp = strstr(devpath, "/block/");
764         if (tmp == NULL){
765                 condlog(4, "no /block/ in '%s'", devpath);
766                 return 1;
767         }
768         if (sscanf(tmp, "/block/%10s", a) != 1 ||
769             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
770                 condlog(4, "discard event on %s", devpath);
771                 return 1;
772         }
773         return 0;
774 }
775
776 int
777 uev_trigger (struct uevent * uev, void * trigger_data)
778 {
779         int r = 0;
780         struct vectors * vecs;
781
782         vecs = (struct vectors *)trigger_data;
783
784         if (uev_discard(uev->devpath))
785                 return 0;
786
787         pthread_cleanup_push(cleanup_lock, &vecs->lock);
788         lock(vecs->lock);
789         pthread_testcancel();
790
791         /*
792          * device map event
793          * Add events are ignored here as the tables
794          * are not fully initialised then.
795          */
796         if (!strncmp(uev->kernel, "dm-", 3)) {
797                 if (!strncmp(uev->action, "change", 6)) {
798                         r = uev_add_map(uev, vecs);
799                         goto out;
800                 }
801                 if (!strncmp(uev->action, "remove", 6)) {
802                         r = uev_remove_map(uev, vecs);
803                         goto out;
804                 }
805                 goto out;
806         }
807
808         /*
809          * path add/remove event
810          */
811         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
812                            uev->kernel) > 0)
813                 goto out;
814
815         if (!strncmp(uev->action, "add", 3)) {
816                 r = uev_add_path(uev, vecs);
817                 goto out;
818         }
819         if (!strncmp(uev->action, "remove", 6)) {
820                 r = uev_remove_path(uev, vecs);
821                 goto out;
822         }
823         if (!strncmp(uev->action, "change", 6)) {
824                 r = uev_update_path(uev, vecs);
825                 goto out;
826         }
827
828 out:
829         lock_cleanup_pop(vecs->lock);
830         return r;
831 }
832
833 static void *
834 ueventloop (void * ap)
835 {
836         block_signal(SIGUSR1, NULL);
837         block_signal(SIGHUP, NULL);
838
839         if (uevent_listen())
840                 condlog(0, "error starting uevent listener");
841
842         return NULL;
843 }
844
845 static void *
846 uevqloop (void * ap)
847 {
848         block_signal(SIGUSR1, NULL);
849         block_signal(SIGHUP, NULL);
850
851         if (uevent_dispatch(&uev_trigger, ap))
852                 condlog(0, "error starting uevent dispatcher");
853
854         return NULL;
855 }
856 static void *
857 uxlsnrloop (void * ap)
858 {
859         block_signal(SIGUSR1, NULL);
860         block_signal(SIGHUP, NULL);
861
862         if (cli_init())
863                 return NULL;
864
865         set_handler_callback(LIST+PATHS, cli_list_paths);
866         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
867         set_handler_callback(LIST+MAPS, cli_list_maps);
868         set_handler_callback(LIST+STATUS, cli_list_status);
869         set_handler_callback(LIST+DAEMON, cli_list_daemon);
870         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
871         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
872         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
873         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
874         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
875         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
876         set_handler_callback(LIST+CONFIG, cli_list_config);
877         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
878         set_handler_callback(LIST+DEVICES, cli_list_devices);
879         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
880         set_handler_callback(ADD+PATH, cli_add_path);
881         set_handler_callback(DEL+PATH, cli_del_path);
882         set_handler_callback(ADD+MAP, cli_add_map);
883         set_handler_callback(DEL+MAP, cli_del_map);
884         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
885         set_handler_callback(RECONFIGURE, cli_reconfigure);
886         set_handler_callback(SUSPEND+MAP, cli_suspend);
887         set_handler_callback(RESUME+MAP, cli_resume);
888         set_handler_callback(RESIZE+MAP, cli_resize);
889         set_handler_callback(RELOAD+MAP, cli_reload);
890         set_handler_callback(RESET+MAP, cli_reassign);
891         set_handler_callback(REINSTATE+PATH, cli_reinstate);
892         set_handler_callback(FAIL+PATH, cli_fail);
893         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
894         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
895         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
896         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
897         set_handler_callback(QUIT, cli_quit);
898         set_handler_callback(SHUTDOWN, cli_shutdown);
899         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
900         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
901         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
902         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
903         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
904
905         umask(077);
906         uxsock_listen(&uxsock_trigger, ap);
907
908         return NULL;
909 }
910
911 int
912 exit_daemon (int status)
913 {
914         if (status != 0)
915                 fprintf(stderr, "bad exit status. see daemon.log\n");
916
917         if (running_state != DAEMON_SHUTDOWN) {
918                 pthread_mutex_lock(&exit_mutex);
919                 pthread_cond_signal(&exit_cond);
920                 pthread_mutex_unlock(&exit_mutex);
921         }
922         return status;
923 }
924
925 const char *
926 daemon_status(void)
927 {
928         switch (running_state) {
929         case DAEMON_INIT:
930                 return "init";
931         case DAEMON_START:
932                 return "startup";
933         case DAEMON_CONFIGURE:
934                 return "configure";
935         case DAEMON_RUNNING:
936                 return "running";
937         case DAEMON_SHUTDOWN:
938                 return "shutdown";
939         }
940         return NULL;
941 }
942
943 static void
944 fail_path (struct path * pp, int del_active)
945 {
946         if (!pp->mpp)
947                 return;
948
949         condlog(2, "checker failed path %s in map %s",
950                  pp->dev_t, pp->mpp->alias);
951
952         dm_fail_path(pp->mpp->alias, pp->dev_t);
953         if (del_active)
954                 update_queue_mode_del_path(pp->mpp);
955 }
956
957 /*
958  * caller must have locked the path list before calling that function
959  */
960 static void
961 reinstate_path (struct path * pp, int add_active)
962 {
963         if (!pp->mpp)
964                 return;
965
966         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
967                 condlog(0, "%s: reinstate failed", pp->dev_t);
968         else {
969                 condlog(2, "%s: reinstated", pp->dev_t);
970                 if (add_active)
971                         update_queue_mode_add_path(pp->mpp);
972         }
973 }
974
975 static void
976 enable_group(struct path * pp)
977 {
978         struct pathgroup * pgp;
979
980         /*
981          * if path is added through uev_add_path, pgindex can be unset.
982          * next update_strings() will set it, upon map reload event.
983          *
984          * we can safely return here, because upon map reload, all
985          * PG will be enabled.
986          */
987         if (!pp->mpp->pg || !pp->pgindex)
988                 return;
989
990         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
991
992         if (pgp->status == PGSTATE_DISABLED) {
993                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
994                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
995         }
996 }
997
998 static void
999 mpvec_garbage_collector (struct vectors * vecs)
1000 {
1001         struct multipath * mpp;
1002         unsigned int i;
1003
1004         if (!vecs->mpvec)
1005                 return;
1006
1007         vector_foreach_slot (vecs->mpvec, mpp, i) {
1008                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1009                         condlog(2, "%s: remove dead map", mpp->alias);
1010                         remove_map_and_stop_waiter(mpp, vecs, 1);
1011                         i--;
1012                 }
1013         }
1014 }
1015
1016 /* This is called after a path has started working again. It the multipath
1017  * device for this path uses the followover failback type, and this is the
1018  * best pathgroup, and this is the first path in the pathgroup to come back
1019  * up, then switch to this pathgroup */
1020 static int
1021 followover_should_failback(struct path * pp)
1022 {
1023         struct pathgroup * pgp;
1024         struct path *pp1;
1025         int i;
1026
1027         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1028             !pp->mpp->pg || !pp->pgindex ||
1029             pp->pgindex != pp->mpp->bestpg)
1030                 return 0;
1031
1032         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1033         vector_foreach_slot(pgp->paths, pp1, i) {
1034                 if (pp1 == pp)
1035                         continue;
1036                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1037                         return 0;
1038         }
1039         return 1;
1040 }
1041
1042 static void
1043 defered_failback_tick (vector mpvec)
1044 {
1045         struct multipath * mpp;
1046         unsigned int i;
1047
1048         vector_foreach_slot (mpvec, mpp, i) {
1049                 /*
1050                  * defered failback getting sooner
1051                  */
1052                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1053                         mpp->failback_tick--;
1054
1055                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1056                                 switch_pathgroup(mpp);
1057                 }
1058         }
1059 }
1060
1061 static void
1062 retry_count_tick(vector mpvec)
1063 {
1064         struct multipath *mpp;
1065         unsigned int i;
1066
1067         vector_foreach_slot (mpvec, mpp, i) {
1068                 if (mpp->retry_tick) {
1069                         mpp->stat_total_queueing_time++;
1070                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1071                         if(--mpp->retry_tick == 0) {
1072                                 dm_queue_if_no_path(mpp->alias, 0);
1073                                 condlog(2, "%s: Disable queueing", mpp->alias);
1074                         }
1075                 }
1076         }
1077 }
1078
1079 int update_prio(struct path *pp, int refresh_all)
1080 {
1081         int oldpriority;
1082         struct path *pp1;
1083         struct pathgroup * pgp;
1084         int i, j, changed = 0;
1085
1086         if (refresh_all) {
1087                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1088                         vector_foreach_slot (pgp->paths, pp1, j) {
1089                                 oldpriority = pp1->priority;
1090                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1091                                 if (pp1->priority != oldpriority)
1092                                         changed = 1;
1093                         }
1094                 }
1095                 return changed;
1096         }
1097         oldpriority = pp->priority;
1098         pathinfo(pp, conf->hwtable, DI_PRIO);
1099
1100         if (pp->priority == oldpriority)
1101                 return 0;
1102         return 1;
1103 }
1104
1105 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1106 {
1107         if (reload_map(vecs, mpp, refresh))
1108                 return 1;
1109
1110         dm_lib_release();
1111         if (setup_multipath(vecs, mpp) != 0)
1112                 return 1;
1113         sync_map_state(mpp);
1114
1115         return 0;
1116 }
1117
1118 void
1119 check_path (struct vectors * vecs, struct path * pp)
1120 {
1121         int newstate;
1122         int new_path_up = 0;
1123         int chkr_new_path_up = 0;
1124         int oldchkrstate = pp->chkrstate;
1125
1126         if (!pp->mpp)
1127                 return;
1128
1129         if (pp->tick && --pp->tick)
1130                 return; /* don't check this path yet */
1131
1132         /*
1133          * provision a next check soonest,
1134          * in case we exit abnormaly from here
1135          */
1136         pp->tick = conf->checkint;
1137
1138         newstate = path_offline(pp);
1139         if (newstate == PATH_UP)
1140                 newstate = get_state(pp, 1);
1141
1142         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1143                 condlog(2, "%s: unusable path", pp->dev);
1144                 pathinfo(pp, conf->hwtable, 0);
1145                 return;
1146         }
1147         /*
1148          * Async IO in flight. Keep the previous path state
1149          * and reschedule as soon as possible
1150          */
1151         if (newstate == PATH_PENDING) {
1152                 pp->tick = 1;
1153                 return;
1154         }
1155         /*
1156          * Synchronize with kernel state
1157          */
1158         if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1159                 condlog(1, "%s: Could not synchronize with kernel state",
1160                         pp->dev);
1161                 pp->dmstate = PSTATE_UNDEF;
1162         }
1163         pp->chkrstate = newstate;
1164         if (newstate != pp->state) {
1165                 int oldstate = pp->state;
1166                 pp->state = newstate;
1167                 LOG_MSG(1, checker_message(&pp->checker));
1168
1169                 /*
1170                  * upon state change, reset the checkint
1171                  * to the shortest delay
1172                  */
1173                 pp->checkint = conf->checkint;
1174
1175                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1176                         /*
1177                          * proactively fail path in the DM
1178                          */
1179                         if (oldstate == PATH_UP ||
1180                             oldstate == PATH_GHOST)
1181                                 fail_path(pp, 1);
1182                         else
1183                                 fail_path(pp, 0);
1184
1185                         /*
1186                          * cancel scheduled failback
1187                          */
1188                         pp->mpp->failback_tick = 0;
1189
1190                         pp->mpp->stat_path_failures++;
1191                         return;
1192                 }
1193
1194                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1195                         if ( pp->mpp && pp->mpp->prflag ){
1196                                 /*
1197                                  * Check Persistent Reservation.
1198                                  */
1199                         condlog(2, "%s: checking persistent reservation "
1200                                 "registration", pp->dev);
1201                         mpath_pr_event_handle(pp);
1202                         }
1203                 }
1204
1205                 /*
1206                  * reinstate this path
1207                  */
1208                 if (oldstate != PATH_UP &&
1209                     oldstate != PATH_GHOST)
1210                         reinstate_path(pp, 1);
1211                 else
1212                         reinstate_path(pp, 0);
1213
1214                 new_path_up = 1;
1215
1216                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1217                         chkr_new_path_up = 1;
1218
1219                 /*
1220                  * if at least one path is up in a group, and
1221                  * the group is disabled, re-enable it
1222                  */
1223                 if (newstate == PATH_UP)
1224                         enable_group(pp);
1225         }
1226         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1227                 if (pp->dmstate == PSTATE_FAILED ||
1228                     pp->dmstate == PSTATE_UNDEF) {
1229                         /* Clear IO errors */
1230                         reinstate_path(pp, 0);
1231                 } else {
1232                         LOG_MSG(4, checker_message(&pp->checker));
1233                         if (pp->checkint != conf->max_checkint) {
1234                                 /*
1235                                  * double the next check delay.
1236                                  * max at conf->max_checkint
1237                                  */
1238                                 if (pp->checkint < (conf->max_checkint / 2))
1239                                         pp->checkint = 2 * pp->checkint;
1240                                 else
1241                                         pp->checkint = conf->max_checkint;
1242
1243                                 condlog(4, "%s: delay next check %is",
1244                                         pp->dev_t, pp->checkint);
1245                         }
1246                         pp->tick = pp->checkint;
1247                 }
1248         }
1249         else if (newstate == PATH_DOWN) {
1250                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1251                         LOG_MSG(3, checker_message(&pp->checker));
1252                 else
1253                         LOG_MSG(2, checker_message(&pp->checker));
1254         }
1255
1256         pp->state = newstate;
1257
1258         /*
1259          * path prio refreshing
1260          */
1261         condlog(4, "path prio refresh");
1262
1263         if (update_prio(pp, new_path_up) &&
1264             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1265              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1266                 update_path_groups(pp->mpp, vecs, !new_path_up);
1267         else if (need_switch_pathgroup(pp->mpp, 0)) {
1268                 if (pp->mpp->pgfailback > 0 &&
1269                     (new_path_up || pp->mpp->failback_tick <= 0))
1270                         pp->mpp->failback_tick =
1271                                 pp->mpp->pgfailback + 1;
1272                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1273                          (chkr_new_path_up && followover_should_failback(pp)))
1274                         switch_pathgroup(pp->mpp);
1275         }
1276 }
1277
1278 static void *
1279 checkerloop (void *ap)
1280 {
1281         struct vectors *vecs;
1282         struct path *pp;
1283         int count = 0;
1284         unsigned int i;
1285         sigset_t old;
1286
1287         mlockall(MCL_CURRENT | MCL_FUTURE);
1288         vecs = (struct vectors *)ap;
1289         condlog(2, "path checkers start up");
1290
1291         /*
1292          * init the path check interval
1293          */
1294         vector_foreach_slot (vecs->pathvec, pp, i) {
1295                 pp->checkint = conf->checkint;
1296         }
1297
1298         while (1) {
1299                 block_signal(SIGHUP, &old);
1300                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1301                 lock(vecs->lock);
1302                 pthread_testcancel();
1303                 condlog(4, "tick");
1304
1305                 if (vecs->pathvec) {
1306                         vector_foreach_slot (vecs->pathvec, pp, i) {
1307                                 check_path(vecs, pp);
1308                         }
1309                 }
1310                 if (vecs->mpvec) {
1311                         defered_failback_tick(vecs->mpvec);
1312                         retry_count_tick(vecs->mpvec);
1313                 }
1314                 if (count)
1315                         count--;
1316                 else {
1317                         condlog(4, "map garbage collection");
1318                         mpvec_garbage_collector(vecs);
1319                         count = MAPGCINT;
1320                 }
1321
1322                 lock_cleanup_pop(vecs->lock);
1323                 pthread_sigmask(SIG_SETMASK, &old, NULL);
1324                 sleep(1);
1325         }
1326         return NULL;
1327 }
1328
1329 int
1330 configure (struct vectors * vecs, int start_waiters)
1331 {
1332         struct multipath * mpp;
1333         struct path * pp;
1334         vector mpvec;
1335         int i;
1336
1337         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1338                 return 1;
1339
1340         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1341                 return 1;
1342
1343         if (!(mpvec = vector_alloc()))
1344                 return 1;
1345
1346         /*
1347          * probe for current path (from sysfs) and map (from dm) sets
1348          */
1349         path_discovery(vecs->pathvec, conf, DI_ALL);
1350
1351         vector_foreach_slot (vecs->pathvec, pp, i){
1352                 if (filter_path(conf, pp) > 0){
1353                         vector_del_slot(vecs->pathvec, i);
1354                         free_path(pp);
1355                         i--;
1356                 }
1357                 else
1358                         pp->checkint = conf->checkint;
1359         }
1360         if (map_discovery(vecs))
1361                 return 1;
1362
1363         /*
1364          * create new set of maps & push changed ones into dm
1365          */
1366         if (coalesce_paths(vecs, mpvec, NULL, 1))
1367                 return 1;
1368
1369         /*
1370          * may need to remove some maps which are no longer relevant
1371          * e.g., due to blacklist changes in conf file
1372          */
1373         if (coalesce_maps(vecs, mpvec))
1374                 return 1;
1375
1376         dm_lib_release();
1377
1378         sync_maps_state(mpvec);
1379         vector_foreach_slot(mpvec, mpp, i){
1380                 remember_wwid(mpp->wwid);
1381                 update_map_pr(mpp);
1382         }
1383
1384         /*
1385          * purge dm of old maps
1386          */
1387         remove_maps(vecs);
1388
1389         /*
1390          * save new set of maps formed by considering current path state
1391          */
1392         vector_free(vecs->mpvec);
1393         vecs->mpvec = mpvec;
1394
1395         /*
1396          * start dm event waiter threads for these new maps
1397          */
1398         vector_foreach_slot(vecs->mpvec, mpp, i) {
1399                 if (setup_multipath(vecs, mpp))
1400                         return 1;
1401                 if (start_waiters)
1402                         if (start_waiter_thread(mpp, vecs))
1403                                 return 1;
1404         }
1405         return 0;
1406 }
1407
1408 int
1409 reconfigure (struct vectors * vecs)
1410 {
1411         struct config * old = conf;
1412         int retval = 1;
1413
1414         /*
1415          * free old map and path vectors ... they use old conf state
1416          */
1417         if (VECTOR_SIZE(vecs->mpvec))
1418                 remove_maps_and_stop_waiters(vecs);
1419
1420         if (VECTOR_SIZE(vecs->pathvec))
1421                 free_pathvec(vecs->pathvec, FREE_PATHS);
1422
1423         vecs->pathvec = NULL;
1424         conf = NULL;
1425
1426         if (!load_config(DEFAULT_CONFIGFILE)) {
1427                 conf->verbosity = old->verbosity;
1428                 conf->daemon = 1;
1429                 configure(vecs, 1);
1430                 free_config(old);
1431                 retval = 0;
1432         }
1433
1434         return retval;
1435 }
1436
1437 static struct vectors *
1438 init_vecs (void)
1439 {
1440         struct vectors * vecs;
1441
1442         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1443
1444         if (!vecs)
1445                 return NULL;
1446
1447         vecs->lock.mutex =
1448                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1449
1450         if (!vecs->lock.mutex)
1451                 goto out;
1452
1453         pthread_mutex_init(vecs->lock.mutex, NULL);
1454         vecs->lock.depth = 0;
1455
1456         return vecs;
1457
1458 out:
1459         FREE(vecs);
1460         condlog(0, "failed to init paths");
1461         return NULL;
1462 }
1463
1464 static void *
1465 signal_set(int signo, void (*func) (int))
1466 {
1467         int r;
1468         struct sigaction sig;
1469         struct sigaction osig;
1470
1471         sig.sa_handler = func;
1472         sigemptyset(&sig.sa_mask);
1473         sig.sa_flags = 0;
1474
1475         r = sigaction(signo, &sig, &osig);
1476
1477         if (r < 0)
1478                 return (SIG_ERR);
1479         else
1480                 return (osig.sa_handler);
1481 }
1482
1483 static void
1484 sighup (int sig)
1485 {
1486         condlog(2, "reconfigure (SIGHUP)");
1487
1488         if (running_state != DAEMON_RUNNING)
1489                 return;
1490
1491         reconfigure(gvecs);
1492
1493 #ifdef _DEBUG_
1494         dbg_free_final(NULL);
1495 #endif
1496 }
1497
1498 static void
1499 sigend (int sig)
1500 {
1501         exit_daemon(0);
1502 }
1503
1504 static void
1505 sigusr1 (int sig)
1506 {
1507         condlog(3, "SIGUSR1 received");
1508 }
1509
1510 static void
1511 signal_init(void)
1512 {
1513         signal_set(SIGHUP, sighup);
1514         signal_set(SIGUSR1, sigusr1);
1515         signal_set(SIGINT, sigend);
1516         signal_set(SIGTERM, sigend);
1517         signal(SIGPIPE, SIG_IGN);
1518 }
1519
1520 static void
1521 setscheduler (void)
1522 {
1523         int res;
1524         static struct sched_param sched_param = {
1525                 .sched_priority = 99
1526         };
1527
1528         res = sched_setscheduler (0, SCHED_RR, &sched_param);
1529
1530         if (res == -1)
1531                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1532         return;
1533 }
1534
1535 static void
1536 set_oom_adj (void)
1537 {
1538 #ifdef OOM_SCORE_ADJ_MIN
1539         int retry = 1;
1540         char *file = "/proc/self/oom_score_adj";
1541         int score = OOM_SCORE_ADJ_MIN;
1542 #else
1543         int retry = 0;
1544         char *file = "/proc/self/oom_adj";
1545         int score = OOM_ADJUST_MIN;
1546 #endif
1547         FILE *fp;
1548         struct stat st;
1549
1550         do {
1551                 if (stat(file, &st) == 0){
1552                         fp = fopen(file, "w");
1553                         if (!fp) {
1554                                 condlog(0, "couldn't fopen %s : %s", file,
1555                                         strerror(errno));
1556                                 return;
1557                         }
1558                         fprintf(fp, "%i", score);
1559                         fclose(fp);
1560                         return;
1561                 }
1562                 if (errno != ENOENT) {
1563                         condlog(0, "couldn't stat %s : %s", file,
1564                                 strerror(errno));
1565                         return;
1566                 }
1567 #ifdef OOM_ADJUST_MIN
1568                 file = "/proc/self/oom_adj";
1569                 score = OOM_ADJUST_MIN;
1570 #else
1571                 retry = 0;
1572 #endif
1573         } while (retry--);
1574         condlog(0, "couldn't adjust oom score");
1575 }
1576
1577 static int
1578 child (void * param)
1579 {
1580         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1581         pthread_attr_t log_attr, misc_attr;
1582         struct vectors * vecs;
1583         struct multipath * mpp;
1584         int i;
1585         sigset_t set;
1586         int rc, pid_rc;
1587
1588         mlockall(MCL_CURRENT | MCL_FUTURE);
1589
1590         setup_thread_attr(&misc_attr, 64 * 1024, 1);
1591         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1592
1593         if (logsink) {
1594                 setup_thread_attr(&log_attr, 64 * 1024, 0);
1595                 log_thread_start(&log_attr);
1596                 pthread_attr_destroy(&log_attr);
1597         }
1598
1599         running_state = DAEMON_START;
1600
1601         condlog(2, "--------start up--------");
1602         condlog(2, "read " DEFAULT_CONFIGFILE);
1603
1604         if (load_config(DEFAULT_CONFIGFILE))
1605                 exit(1);
1606
1607         if (init_checkers()) {
1608                 condlog(0, "failed to initialize checkers");
1609                 exit(1);
1610         }
1611         if (init_prio()) {
1612                 condlog(0, "failed to initialize prioritizers");
1613                 exit(1);
1614         }
1615
1616         setlogmask(LOG_UPTO(conf->verbosity + 3));
1617
1618         if (conf->max_fds) {
1619                 struct rlimit fd_limit;
1620
1621                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1622                         condlog(0, "can't get open fds limit: %s",
1623                                 strerror(errno));
1624                         fd_limit.rlim_cur = 0;
1625                         fd_limit.rlim_max = 0;
1626                 }
1627                 if (fd_limit.rlim_cur < conf->max_fds) {
1628                         fd_limit.rlim_cur = conf->max_fds;
1629                         if (fd_limit.rlim_max < conf->max_fds)
1630                                 fd_limit.rlim_max = conf->max_fds;
1631                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1632                                 condlog(0, "can't set open fds limit to "
1633                                         "%lu/%lu : %s",
1634                                         fd_limit.rlim_cur, fd_limit.rlim_max,
1635                                         strerror(errno));
1636                         } else {
1637                                 condlog(3, "set open fds limit to %lu/%lu",
1638                                         fd_limit.rlim_cur, fd_limit.rlim_max);
1639                         }
1640                 }
1641
1642         }
1643
1644         vecs = gvecs = init_vecs();
1645         if (!vecs)
1646                 exit(1);
1647
1648         signal_init();
1649         setscheduler();
1650         set_oom_adj();
1651
1652         conf->daemon = 1;
1653         udev_set_sync_support(0);
1654         /*
1655          * Start uevent listener early to catch events
1656          */
1657         if ((rc = pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs))) {
1658                 condlog(0, "failed to create uevent thread: %d", rc);
1659                 exit(1);
1660         }
1661         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1662                 condlog(0, "failed to create cli listener: %d", rc);
1663                 exit(1);
1664         }
1665         /*
1666          * fetch and configure both paths and multipaths
1667          */
1668         running_state = DAEMON_CONFIGURE;
1669
1670         lock(vecs->lock);
1671         if (configure(vecs, 1)) {
1672                 unlock(vecs->lock);
1673                 condlog(0, "failure during configuration");
1674                 exit(1);
1675         }
1676         unlock(vecs->lock);
1677
1678         /*
1679          * start threads
1680          */
1681         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1682                 condlog(0,"failed to create checker loop thread: %d", rc);
1683                 exit(1);
1684         }
1685         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1686                 condlog(0, "failed to create uevent dispatcher: %d", rc);
1687                 exit(1);
1688         }
1689         pthread_attr_destroy(&misc_attr);
1690
1691         pthread_mutex_lock(&exit_mutex);
1692         /* Startup complete, create logfile */
1693         pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1694         /* Ignore errors, we can live without */
1695
1696         running_state = DAEMON_RUNNING;
1697         pthread_cond_wait(&exit_cond, &exit_mutex);
1698         /* Need to block these to avoid deadlocking */
1699         sigemptyset(&set);
1700         sigaddset(&set, SIGTERM);
1701         sigaddset(&set, SIGINT);
1702         pthread_sigmask(SIG_BLOCK, &set, NULL);
1703
1704         /*
1705          * exit path
1706          */
1707         running_state = DAEMON_SHUTDOWN;
1708         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1709         block_signal(SIGHUP, NULL);
1710         lock(vecs->lock);
1711         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1712                 vector_foreach_slot(vecs->mpvec, mpp, i)
1713                         dm_queue_if_no_path(mpp->alias, 0);
1714         remove_maps_and_stop_waiters(vecs);
1715         unlock(vecs->lock);
1716
1717         pthread_cancel(check_thr);
1718         pthread_cancel(uevent_thr);
1719         pthread_cancel(uxlsnr_thr);
1720         pthread_cancel(uevq_thr);
1721
1722         lock(vecs->lock);
1723         free_pathvec(vecs->pathvec, FREE_PATHS);
1724         vecs->pathvec = NULL;
1725         unlock(vecs->lock);
1726         /* Now all the waitevent threads will start rushing in. */
1727         while (vecs->lock.depth > 0) {
1728                 sleep (1); /* This is weak. */
1729                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1730                         " waiting...", vecs->lock.depth);
1731         }
1732         pthread_mutex_destroy(vecs->lock.mutex);
1733         FREE(vecs->lock.mutex);
1734         vecs->lock.depth = 0;
1735         vecs->lock.mutex = NULL;
1736         FREE(vecs);
1737         vecs = NULL;
1738
1739         cleanup_checkers();
1740         cleanup_prio();
1741
1742         dm_lib_release();
1743         dm_lib_exit();
1744
1745         /* We're done here */
1746         if (!pid_rc) {
1747                 condlog(3, "unlink pidfile");
1748                 unlink(DEFAULT_PIDFILE);
1749         }
1750
1751         condlog(2, "--------shut down-------");
1752
1753         if (logsink)
1754                 log_thread_stop();
1755
1756         /*
1757          * Freeing config must be done after condlog() and dm_lib_exit(),
1758          * because logging functions like dlog() and dm_write_log()
1759          * reference the config.
1760          */
1761         free_config(conf);
1762         conf = NULL;
1763
1764 #ifdef _DEBUG_
1765         dbg_free_final(NULL);
1766 #endif
1767
1768         exit(0);
1769 }
1770
1771 static int
1772 daemonize(void)
1773 {
1774         int pid;
1775         int dev_null_fd;
1776
1777         if( (pid = fork()) < 0){
1778                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1779                 return -1;
1780         }
1781         else if (pid != 0)
1782                 return pid;
1783
1784         setsid();
1785
1786         if ( (pid = fork()) < 0)
1787                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1788         else if (pid != 0)
1789                 _exit(0);
1790
1791         if (chdir("/") < 0)
1792                 fprintf(stderr, "cannot chdir to '/', continuing\n");
1793
1794         dev_null_fd = open("/dev/null", O_RDWR);
1795         if (dev_null_fd < 0){
1796                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1797                         strerror(errno));
1798                 _exit(0);
1799         }
1800
1801         close(STDIN_FILENO);
1802         if (dup(dev_null_fd) < 0) {
1803                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1804                         strerror(errno));
1805                 _exit(0);
1806         }
1807         close(STDOUT_FILENO);
1808         if (dup(dev_null_fd) < 0) {
1809                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1810                         strerror(errno));
1811                 _exit(0);
1812         }
1813         close(STDERR_FILENO);
1814         if (dup(dev_null_fd) < 0) {
1815                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1816                         strerror(errno));
1817                 _exit(0);
1818         }
1819         close(dev_null_fd);
1820         daemon_pid = getpid();
1821         return 0;
1822 }
1823
1824 int
1825 main (int argc, char *argv[])
1826 {
1827         extern char *optarg;
1828         extern int optind;
1829         int arg;
1830         int err;
1831
1832         logsink = 1;
1833         running_state = DAEMON_INIT;
1834         dm_init();
1835
1836         if (getuid() != 0) {
1837                 fprintf(stderr, "need to be root\n");
1838                 exit(1);
1839         }
1840
1841         /* make sure we don't lock any path */
1842         if (chdir("/") < 0)
1843                 fprintf(stderr, "can't chdir to root directory : %s\n",
1844                         strerror(errno));
1845         umask(umask(077) | 022);
1846
1847         conf = alloc_config();
1848
1849         if (!conf)
1850                 exit(1);
1851
1852         while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1853         switch(arg) {
1854                 case 'd':
1855                         logsink = 0;
1856                         //debug=1; /* ### comment me out ### */
1857                         break;
1858                 case 'v':
1859                         if (sizeof(optarg) > sizeof(char *) ||
1860                             !isdigit(optarg[0]))
1861                                 exit(1);
1862
1863                         conf->verbosity = atoi(optarg);
1864                         break;
1865                 case 'k':
1866                         uxclnt(optarg);
1867                         exit(0);
1868                 default:
1869                         ;
1870                 }
1871         }
1872         if (optind < argc) {
1873                 char cmd[CMDSIZE];
1874                 char * s = cmd;
1875                 char * c = s;
1876
1877                 while (optind < argc) {
1878                         if (strchr(argv[optind], ' '))
1879                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1880                         else
1881                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1882                         optind++;
1883                 }
1884                 c += snprintf(c, s + CMDSIZE - c, "\n");
1885                 uxclnt(s);
1886                 exit(0);
1887         }
1888
1889         if (!logsink)
1890                 err = 0;
1891         else
1892                 err = daemonize();
1893
1894         if (err < 0)
1895                 /* error */
1896                 exit(1);
1897         else if (err > 0)
1898                 /* parent dies */
1899                 exit(0);
1900         else
1901                 /* child lives */
1902                 return (child(NULL));
1903 }
1904
1905 void *  mpath_pr_event_handler_fn (void * pathp )
1906 {
1907         struct multipath * mpp;
1908         int i,j, ret, isFound;
1909         struct path * pp = (struct path *)pathp;
1910         unsigned char *keyp;
1911         uint64_t prkey;
1912         struct prout_param_descriptor *param;
1913         struct prin_resp *resp;
1914
1915         mpp = pp->mpp;
1916
1917         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1918         if (!resp){
1919                 condlog(0,"%s Alloc failed for prin response", pp->dev);
1920                 return NULL;
1921         }
1922
1923         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1924         if (ret != MPATH_PR_SUCCESS )
1925         {
1926                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1927                 goto out;
1928         }
1929
1930         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1931                         resp->prin_descriptor.prin_readkeys.additional_length );
1932
1933         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1934         {
1935                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1936                 ret = MPATH_PR_SUCCESS;
1937                 goto out;
1938         }
1939         prkey = 0;
1940         keyp = (unsigned char *)mpp->reservation_key;
1941         for (j = 0; j < 8; ++j) {
1942                 if (j > 0)
1943                         prkey <<= 8;
1944                 prkey |= *keyp;
1945                 ++keyp;
1946         }
1947         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
1948
1949         isFound =0;
1950         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1951         {
1952                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
1953                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1954                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1955                 {
1956                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1957                         isFound =1;
1958                         break;
1959                 }
1960         }
1961         if (!isFound)
1962         {
1963                 condlog(0, "%s: Either device not registered or ", pp->dev);
1964                 condlog(0, "host is not authorised for registration. Skip path");
1965                 ret = MPATH_PR_OTHER;
1966                 goto out;
1967         }
1968
1969         param= malloc(sizeof(struct prout_param_descriptor));
1970         memset(param, 0 , sizeof(struct prout_param_descriptor));
1971
1972         for (j = 7; j >= 0; --j) {
1973                 param->sa_key[j] = (prkey & 0xff);
1974                 prkey >>= 8;
1975         }
1976         param->num_transportid = 0;
1977
1978         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1979
1980         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1981         if (ret != MPATH_PR_SUCCESS )
1982         {
1983                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1984         }
1985         mpp->prflag = 1;
1986
1987         free(param);
1988 out:
1989         free(resp);
1990         return NULL;
1991 }
1992
1993 int mpath_pr_event_handle(struct path *pp)
1994 {
1995         pthread_t thread;
1996         int rc;
1997         pthread_attr_t attr;
1998         struct multipath * mpp;
1999
2000         mpp = pp->mpp;
2001
2002         if (!mpp->reservation_key)
2003                 return -1;
2004
2005         pthread_attr_init(&attr);
2006         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2007
2008         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2009         if (rc) {
2010                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2011                 return -1;
2012         }
2013         pthread_attr_destroy(&attr);
2014         rc = pthread_join(thread, NULL);
2015         return 0;
2016 }
2017