8681aa23fb45e03af6d463a46cb32277701836a3
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <semaphore.h>
21 #include <mpath_persist.h>
22
23 /*
24  * libcheckers
25  */
26 #include <checkers.h>
27
28 /*
29  * libmultipath
30  */
31 #include <parser.h>
32 #include <vector.h>
33 #include <memory.h>
34 #include <config.h>
35 #include <util.h>
36 #include <hwtable.h>
37 #include <defaults.h>
38 #include <structs.h>
39 #include <blacklist.h>
40 #include <structs_vec.h>
41 #include <dmparser.h>
42 #include <devmapper.h>
43 #include <sysfs.h>
44 #include <dict.h>
45 #include <discovery.h>
46 #include <debug.h>
47 #include <propsel.h>
48 #include <uevent.h>
49 #include <switchgroup.h>
50 #include <print.h>
51 #include <configure.h>
52 #include <prio.h>
53 #include <pgpolicies.h>
54 #include <uevent.h>
55 #include <log.h>
56
57 #include "main.h"
58 #include "pidfile.h"
59 #include "uxlsnr.h"
60 #include "uxclnt.h"
61 #include "cli.h"
62 #include "cli_handlers.h"
63 #include "lock.h"
64 #include "waiter.h"
65 #include "wwids.h"
66
67 #define FILE_NAME_SIZE 256
68 #define CMDSIZE 160
69
70 #define LOG_MSG(a, b) \
71 do { \
72         if (pp->offline) \
73                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
74         else if (strlen(b)) \
75                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
76 } while(0)
77
78 struct mpath_event_param
79 {
80         char * devname;
81         struct multipath *mpp;
82 };
83
84 unsigned int mpath_mx_alloc_len;
85
86 int logsink;
87 enum daemon_status running_state;
88 pid_t daemon_pid;
89
90 static sem_t exit_sem;
91 /*
92  * global copy of vecs for use in sig handlers
93  */
94 struct vectors * gvecs;
95
96 struct udev * udev;
97
98 static int
99 need_switch_pathgroup (struct multipath * mpp, int refresh)
100 {
101         struct pathgroup * pgp;
102         struct path * pp;
103         unsigned int i, j;
104
105         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
106                 return 0;
107
108         /*
109          * Refresh path priority values
110          */
111         if (refresh)
112                 vector_foreach_slot (mpp->pg, pgp, i)
113                         vector_foreach_slot (pgp->paths, pp, j)
114                                 pathinfo(pp, conf->hwtable, DI_PRIO);
115
116         mpp->bestpg = select_path_group(mpp);
117
118         if (mpp->bestpg != mpp->nextpg)
119                 return 1;
120
121         return 0;
122 }
123
124 static void
125 switch_pathgroup (struct multipath * mpp)
126 {
127         mpp->stat_switchgroup++;
128         dm_switchgroup(mpp->alias, mpp->bestpg);
129         condlog(2, "%s: switch to path group #%i",
130                  mpp->alias, mpp->bestpg);
131 }
132
133 static int
134 coalesce_maps(struct vectors *vecs, vector nmpv)
135 {
136         struct multipath * ompp;
137         vector ompv = vecs->mpvec;
138         unsigned int i;
139         int j;
140
141         vector_foreach_slot (ompv, ompp, i) {
142                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
143                         /*
144                          * remove all current maps not allowed by the
145                          * current configuration
146                          */
147                         if (dm_flush_map(ompp->alias)) {
148                                 condlog(0, "%s: unable to flush devmap",
149                                         ompp->alias);
150                                 /*
151                                  * may be just because the device is open
152                                  */
153                                 if (!vector_alloc_slot(nmpv))
154                                         return 1;
155
156                                 vector_set_slot(nmpv, ompp);
157                                 setup_multipath(vecs, ompp);
158
159                                 if ((j = find_slot(ompv, (void *)ompp)) != -1)
160                                         vector_del_slot(ompv, j);
161
162                                 continue;
163                         }
164                         else {
165                                 dm_lib_release();
166                                 condlog(2, "%s devmap removed", ompp->alias);
167                         }
168                 } else if (conf->reassign_maps) {
169                         condlog(3, "%s: Reassign existing device-mapper"
170                                 " devices", ompp->alias);
171                         dm_reassign(ompp->alias);
172                 }
173         }
174         return 0;
175 }
176
177 void
178 sync_map_state(struct multipath *mpp)
179 {
180         struct pathgroup *pgp;
181         struct path *pp;
182         unsigned int i, j;
183
184         if (!mpp->pg)
185                 return;
186
187         vector_foreach_slot (mpp->pg, pgp, i){
188                 vector_foreach_slot (pgp->paths, pp, j){
189                         if (pp->state == PATH_UNCHECKED || 
190                             pp->state == PATH_WILD)
191                                 continue;
192                         if ((pp->dmstate == PSTATE_FAILED ||
193                              pp->dmstate == PSTATE_UNDEF) &&
194                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
195                                 dm_reinstate_path(mpp->alias, pp->dev_t);
196                         else if ((pp->dmstate == PSTATE_ACTIVE ||
197                                   pp->dmstate == PSTATE_UNDEF) &&
198                                  (pp->state == PATH_DOWN ||
199                                   pp->state == PATH_SHAKY))
200                                 dm_fail_path(mpp->alias, pp->dev_t);
201                 }
202         }
203 }
204
205 static void
206 sync_maps_state(vector mpvec)
207 {
208         unsigned int i;
209         struct multipath *mpp;
210
211         vector_foreach_slot (mpvec, mpp, i)
212                 sync_map_state(mpp);
213 }
214
215 static int
216 flush_map(struct multipath * mpp, struct vectors * vecs)
217 {
218         /*
219          * clear references to this map before flushing so we can ignore
220          * the spurious uevent we may generate with the dm_flush_map call below
221          */
222         if (dm_flush_map(mpp->alias)) {
223                 /*
224                  * May not really be an error -- if the map was already flushed
225                  * from the device mapper by dmsetup(8) for instance.
226                  */
227                 condlog(0, "%s: can't flush", mpp->alias);
228                 return 1;
229         }
230         else {
231                 dm_lib_release();
232                 condlog(2, "%s: devmap removed", mpp->alias);
233         }
234
235         orphan_paths(vecs->pathvec, mpp);
236         remove_map_and_stop_waiter(mpp, vecs, 1);
237
238         return 0;
239 }
240
241 static int
242 uev_add_map (struct uevent * uev, struct vectors * vecs)
243 {
244         char *alias;
245         int major = -1, minor = -1, rc;
246
247         condlog(3, "%s: add map (uevent)", uev->kernel);
248         alias = uevent_get_dm_name(uev);
249         if (!alias) {
250                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
251                 major = uevent_get_major(uev);
252                 minor = uevent_get_minor(uev);
253                 alias = dm_mapname(major, minor);
254                 if (!alias) {
255                         condlog(2, "%s: mapname not found for %d:%d",
256                                 uev->kernel, major, minor);
257                         return 1;
258                 }
259         }
260         rc = ev_add_map(uev->kernel, alias, vecs);
261         FREE(alias);
262         return rc;
263 }
264
265 int
266 ev_add_map (char * dev, char * alias, struct vectors * vecs)
267 {
268         char * refwwid;
269         struct multipath * mpp;
270         int map_present;
271         int r = 1;
272
273         map_present = dm_map_present(alias);
274
275         if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
276                 condlog(4, "%s: not a multipath map", alias);
277                 return 0;
278         }
279
280         mpp = find_mp_by_alias(vecs->mpvec, alias);
281
282         if (mpp) {
283                 /*
284                  * Not really an error -- we generate our own uevent
285                  * if we create a multipath mapped device as a result
286                  * of uev_add_path
287                  */
288                 if (conf->reassign_maps) {
289                         condlog(3, "%s: Reassign existing device-mapper devices",
290                                 alias);
291                         dm_reassign(alias);
292                 }
293                 return 0;
294         }
295         condlog(2, "%s: adding map", alias);
296
297         /*
298          * now we can register the map
299          */
300         if (map_present && (mpp = add_map_without_path(vecs, alias))) {
301                 sync_map_state(mpp);
302                 condlog(2, "%s: devmap %s registered", alias, dev);
303                 return 0;
304         }
305         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
306
307         if (refwwid) {
308                 r = coalesce_paths(vecs, NULL, refwwid, 0);
309                 dm_lib_release();
310         }
311
312         if (!r)
313                 condlog(2, "%s: devmap %s added", alias, dev);
314         else if (r == 2)
315                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
316         else
317                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
318
319         FREE(refwwid);
320         return r;
321 }
322
323 static int
324 uev_remove_map (struct uevent * uev, struct vectors * vecs)
325 {
326         char *alias;
327         int minor;
328         struct multipath *mpp;
329
330         condlog(2, "%s: remove map (uevent)", uev->kernel);
331         alias = uevent_get_dm_name(uev);
332         if (!alias) {
333                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
334                 return 0;
335         }
336         minor = uevent_get_minor(uev);
337         mpp = find_mp_by_minor(vecs->mpvec, minor);
338
339         if (!mpp) {
340                 condlog(2, "%s: devmap not registered, can't remove",
341                         uev->kernel);
342                 goto out;
343         }
344         if (strcmp(mpp->alias, alias)) {
345                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
346                         mpp->alias, mpp->dmi->minor, minor);
347                 goto out;
348         }
349
350         orphan_paths(vecs->pathvec, mpp);
351         remove_map_and_stop_waiter(mpp, vecs, 1);
352 out:
353         FREE(alias);
354         return 0;
355 }
356
357 int
358 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
359 {
360         struct multipath * mpp;
361
362         mpp = find_mp_by_minor(vecs->mpvec, minor);
363
364         if (!mpp) {
365                 condlog(2, "%s: devmap not registered, can't remove",
366                         devname);
367                 return 0;
368         }
369         if (strcmp(mpp->alias, alias)) {
370                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
371                         mpp->alias, mpp->dmi->minor, minor);
372                 return 0;
373         }
374         return flush_map(mpp, vecs);
375 }
376
377 static int
378 uev_add_path (struct uevent *uev, struct vectors * vecs)
379 {
380         struct path *pp;
381         int ret, i;
382
383         condlog(2, "%s: add path (uevent)", uev->kernel);
384         if (strstr(uev->kernel, "..") != NULL) {
385                 /*
386                  * Don't allow relative device names in the pathvec
387                  */
388                 condlog(0, "%s: path name is invalid", uev->kernel);
389                 return 1;
390         }
391
392         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
393         if (pp) {
394                 condlog(0, "%s: spurious uevent, path already in pathvec",
395                         uev->kernel);
396                 if (pp->mpp)
397                         return 0;
398                 if (!strlen(pp->wwid)) {
399                         udev_device_unref(pp->udev);
400                         pp->udev = udev_device_ref(uev->udev);
401                         ret = pathinfo(pp, conf->hwtable,
402                                        DI_ALL | DI_BLACKLIST);
403                         if (ret == 2) {
404                                 i = find_slot(vecs->pathvec, (void *)pp);
405                                 if (i != -1)
406                                         vector_del_slot(vecs->pathvec, i);
407                                 free_path(pp);
408                                 return 0;
409                         } else if (ret == 1) {
410                                 condlog(0, "%s: failed to reinitialize path",
411                                         uev->kernel);
412                                 return 1;
413                         }
414                 }
415         } else {
416                 /*
417                  * get path vital state
418                  */
419                 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
420                                      uev->udev, DI_ALL, &pp);
421                 if (!pp) {
422                         if (ret == 2)
423                                 return 0;
424                         condlog(0, "%s: failed to store path info",
425                                 uev->kernel);
426                         return 1;
427                 }
428                 pp->checkint = conf->checkint;
429         }
430
431         return ev_add_path(pp, vecs);
432 }
433
434 /*
435  * returns:
436  * 0: added
437  * 1: error
438  */
439 int
440 ev_add_path (struct path * pp, struct vectors * vecs)
441 {
442         struct multipath * mpp;
443         char empty_buff[WWID_SIZE] = {0};
444         char params[PARAMS_SIZE] = {0};
445         int retries = 3;
446         int start_waiter = 0;
447
448         /*
449          * need path UID to go any further
450          */
451         if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
452                 condlog(0, "%s: failed to get path uid", pp->dev);
453                 goto fail; /* leave path added to pathvec */
454         }
455         mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
456 rescan:
457         if (mpp) {
458                 if ((!pp->size) || (mpp->size != pp->size)) {
459                         if (!pp->size)
460                                 condlog(0, "%s: failed to add new path %s, "
461                                         "device size is 0",
462                                         mpp->alias, pp->dev);
463                         else
464                                 condlog(0, "%s: failed to add new path %s, "
465                                         "device size mismatch",
466                                         mpp->alias, pp->dev);
467                         int i = find_slot(vecs->pathvec, (void *)pp);
468                         if (i != -1)
469                                 vector_del_slot(vecs->pathvec, i);
470                         free_path(pp);
471                         return 1;
472                 }
473
474                 condlog(4,"%s: adopting all paths for path %s",
475                         mpp->alias, pp->dev);
476                 if (adopt_paths(vecs->pathvec, mpp, 1))
477                         goto fail; /* leave path added to pathvec */
478
479                 verify_paths(mpp, vecs, NULL);
480                 mpp->flush_on_last_del = FLUSH_UNDEF;
481                 mpp->action = ACT_RELOAD;
482         }
483         else {
484                 if (!pp->size) {
485                         condlog(0, "%s: failed to create new map,"
486                                 " device size is 0 ", pp->dev);
487                         int i = find_slot(vecs->pathvec, (void *)pp);
488                         if (i != -1)
489                                 vector_del_slot(vecs->pathvec, i);
490                         free_path(pp);
491                         return 1;
492                 }
493
494                 condlog(4,"%s: creating new map", pp->dev);
495                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
496                         mpp->action = ACT_CREATE;
497                         /*
498                          * We don't depend on ACT_CREATE, as domap will
499                          * set it to ACT_NOTHING when complete.
500                          */
501                         start_waiter = 1;
502                 }
503                 else
504                         goto fail; /* leave path added to pathvec */
505         }
506
507         /* persistent reseravtion check*/
508         mpath_pr_event_handle(pp);      
509
510         /*
511          * push the map to the device-mapper
512          */
513         if (setup_map(mpp, params, PARAMS_SIZE)) {
514                 condlog(0, "%s: failed to setup map for addition of new "
515                         "path %s", mpp->alias, pp->dev);
516                 goto fail_map;
517         }
518         /*
519          * reload the map for the multipath mapped device
520          */
521         if (domap(mpp, params) <= 0) {
522                 condlog(0, "%s: failed in domap for addition of new "
523                         "path %s", mpp->alias, pp->dev);
524                 /*
525                  * deal with asynchronous uevents :((
526                  */
527                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
528                         condlog(0, "%s: uev_add_path sleep", mpp->alias);
529                         sleep(1);
530                         update_mpp_paths(mpp, vecs->pathvec);
531                         goto rescan;
532                 }
533                 else if (mpp->action == ACT_RELOAD)
534                         condlog(0, "%s: giving up reload", mpp->alias);
535                 else
536                         goto fail_map;
537         }
538         dm_lib_release();
539
540         /*
541          * update our state from kernel regardless of create or reload
542          */
543         if (setup_multipath(vecs, mpp))
544                 goto fail; /* if setup_multipath fails, it removes the map */
545
546         sync_map_state(mpp);
547
548         if ((mpp->action == ACT_CREATE ||
549              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
550             start_waiter_thread(mpp, vecs))
551                         goto fail_map;
552
553         if (retries >= 0) {
554                 condlog(2, "%s [%s]: path added to devmap %s",
555                         pp->dev, pp->dev_t, mpp->alias);
556                 return 0;
557         }
558         else
559                 return 1;
560
561 fail_map:
562         remove_map(mpp, vecs, 1);
563 fail:
564         orphan_path(pp);
565         return 1;
566 }
567
568 static int
569 uev_remove_path (struct uevent *uev, struct vectors * vecs)
570 {
571         struct path *pp;
572
573         condlog(2, "%s: remove path (uevent)", uev->kernel);
574         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
575
576         if (!pp) {
577                 /* Not an error; path might have been purged earlier */
578                 condlog(0, "%s: path already removed", uev->kernel);
579                 return 0;
580         }
581
582         return ev_remove_path(pp, vecs);
583 }
584
585 int
586 ev_remove_path (struct path *pp, struct vectors * vecs)
587 {
588         struct multipath * mpp;
589         int i, retval = 0;
590         char params[PARAMS_SIZE] = {0};
591
592         /*
593          * avoid referring to the map of an orphaned path
594          */
595         if ((mpp = pp->mpp)) {
596                 /*
597                  * transform the mp->pg vector of vectors of paths
598                  * into a mp->params string to feed the device-mapper
599                  */
600                 if (update_mpp_paths(mpp, vecs->pathvec)) {
601                         condlog(0, "%s: failed to update paths",
602                                 mpp->alias);
603                         goto fail;
604                 }
605                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
606                         vector_del_slot(mpp->paths, i);
607
608                 /*
609                  * remove the map IFF removing the last path
610                  */
611                 if (VECTOR_SIZE(mpp->paths) == 0) {
612                         char alias[WWID_SIZE];
613
614                         /*
615                          * flush_map will fail if the device is open
616                          */
617                         strncpy(alias, mpp->alias, WWID_SIZE);
618                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
619                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
620                                 mpp->retry_tick = 0;
621                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
622                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
623                                 dm_queue_if_no_path(mpp->alias, 0);
624                         }
625                         if (!flush_map(mpp, vecs)) {
626                                 condlog(2, "%s: removed map after"
627                                         " removing all paths",
628                                         alias);
629                                 retval = 0;
630                                 goto out;
631                         }
632                         /*
633                          * Not an error, continue
634                          */
635                 }
636
637                 if (setup_map(mpp, params, PARAMS_SIZE)) {
638                         condlog(0, "%s: failed to setup map for"
639                                 " removal of path %s", mpp->alias, pp->dev);
640                         goto fail;
641                 }
642                 /*
643                  * reload the map
644                  */
645                 mpp->action = ACT_RELOAD;
646                 if (domap(mpp, params) <= 0) {
647                         condlog(0, "%s: failed in domap for "
648                                 "removal of path %s",
649                                 mpp->alias, pp->dev);
650                         retval = 1;
651                 } else {
652                         /*
653                          * update our state from kernel
654                          */
655                         if (setup_multipath(vecs, mpp)) {
656                                 goto fail;
657                         }
658                         sync_map_state(mpp);
659
660                         condlog(2, "%s [%s]: path removed from map %s",
661                                 pp->dev, pp->dev_t, mpp->alias);
662                 }
663         }
664
665 out:
666         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
667                 vector_del_slot(vecs->pathvec, i);
668
669         free_path(pp);
670
671         return retval;
672
673 fail:
674         remove_map_and_stop_waiter(mpp, vecs, 1);
675         return 1;
676 }
677
678 static int
679 uev_update_path (struct uevent *uev, struct vectors * vecs)
680 {
681         int ro, retval = 0;
682
683         ro = uevent_get_disk_ro(uev);
684
685         if (ro >= 0) {
686                 struct path * pp;
687
688                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
689                         uev->kernel, ro);
690                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
691                 if (!pp) {
692                         condlog(0, "%s: spurious uevent, path not found",
693                                 uev->kernel);
694                         return 1;
695                 }
696                 if (pp->mpp) {
697                         retval = reload_map(vecs, pp->mpp, 0);
698
699                         condlog(2, "%s: map %s reloaded (retval %d)",
700                                 uev->kernel, pp->mpp->alias, retval);
701                 }
702
703         }
704
705         return retval;
706 }
707
708 static int
709 map_discovery (struct vectors * vecs)
710 {
711         struct multipath * mpp;
712         unsigned int i;
713
714         if (dm_get_maps(vecs->mpvec))
715                 return 1;
716
717         vector_foreach_slot (vecs->mpvec, mpp, i)
718                 if (setup_multipath(vecs, mpp))
719                         return 1;
720
721         return 0;
722 }
723
724 int
725 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
726 {
727         struct vectors * vecs;
728         int r;
729
730         *reply = NULL;
731         *len = 0;
732         vecs = (struct vectors *)trigger_data;
733
734         pthread_cleanup_push(cleanup_lock, &vecs->lock);
735         lock(vecs->lock);
736         pthread_testcancel();
737
738         r = parse_cmd(str, reply, len, vecs);
739
740         if (r > 0) {
741                 *reply = STRDUP("fail\n");
742                 *len = strlen(*reply) + 1;
743                 r = 1;
744         }
745         else if (!r && *len == 0) {
746                 *reply = STRDUP("ok\n");
747                 *len = strlen(*reply) + 1;
748                 r = 0;
749         }
750         /* else if (r < 0) leave *reply alone */
751
752         lock_cleanup_pop(vecs->lock);
753         return r;
754 }
755
756 static int
757 uev_discard(char * devpath)
758 {
759         char *tmp;
760         char a[11], b[11];
761
762         /*
763          * keep only block devices, discard partitions
764          */
765         tmp = strstr(devpath, "/block/");
766         if (tmp == NULL){
767                 condlog(4, "no /block/ in '%s'", devpath);
768                 return 1;
769         }
770         if (sscanf(tmp, "/block/%10s", a) != 1 ||
771             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
772                 condlog(4, "discard event on %s", devpath);
773                 return 1;
774         }
775         return 0;
776 }
777
778 int
779 uev_trigger (struct uevent * uev, void * trigger_data)
780 {
781         int r = 0;
782         struct vectors * vecs;
783
784         vecs = (struct vectors *)trigger_data;
785
786         if (uev_discard(uev->devpath))
787                 return 0;
788
789         pthread_cleanup_push(cleanup_lock, &vecs->lock);
790         lock(vecs->lock);
791         pthread_testcancel();
792
793         /*
794          * device map event
795          * Add events are ignored here as the tables
796          * are not fully initialised then.
797          */
798         if (!strncmp(uev->kernel, "dm-", 3)) {
799                 if (!strncmp(uev->action, "change", 6)) {
800                         r = uev_add_map(uev, vecs);
801                         goto out;
802                 }
803                 if (!strncmp(uev->action, "remove", 6)) {
804                         r = uev_remove_map(uev, vecs);
805                         goto out;
806                 }
807                 goto out;
808         }
809
810         /*
811          * path add/remove event
812          */
813         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
814                            uev->kernel) > 0)
815                 goto out;
816
817         if (!strncmp(uev->action, "add", 3)) {
818                 r = uev_add_path(uev, vecs);
819                 goto out;
820         }
821         if (!strncmp(uev->action, "remove", 6)) {
822                 r = uev_remove_path(uev, vecs);
823                 goto out;
824         }
825         if (!strncmp(uev->action, "change", 6)) {
826                 r = uev_update_path(uev, vecs);
827                 goto out;
828         }
829
830 out:
831         lock_cleanup_pop(vecs->lock);
832         return r;
833 }
834
835 static void *
836 ueventloop (void * ap)
837 {
838         if (uevent_listen(udev))
839                 condlog(0, "error starting uevent listener");
840
841         return NULL;
842 }
843
844 static void *
845 uevqloop (void * ap)
846 {
847         if (uevent_dispatch(&uev_trigger, ap))
848                 condlog(0, "error starting uevent dispatcher");
849
850         return NULL;
851 }
852 static void *
853 uxlsnrloop (void * ap)
854 {
855         if (cli_init())
856                 return NULL;
857
858         set_handler_callback(LIST+PATHS, cli_list_paths);
859         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
860         set_handler_callback(LIST+MAPS, cli_list_maps);
861         set_handler_callback(LIST+STATUS, cli_list_status);
862         set_handler_callback(LIST+DAEMON, cli_list_daemon);
863         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
864         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
865         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
866         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
867         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
868         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
869         set_handler_callback(LIST+CONFIG, cli_list_config);
870         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
871         set_handler_callback(LIST+DEVICES, cli_list_devices);
872         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
873         set_handler_callback(ADD+PATH, cli_add_path);
874         set_handler_callback(DEL+PATH, cli_del_path);
875         set_handler_callback(ADD+MAP, cli_add_map);
876         set_handler_callback(DEL+MAP, cli_del_map);
877         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
878         set_handler_callback(RECONFIGURE, cli_reconfigure);
879         set_handler_callback(SUSPEND+MAP, cli_suspend);
880         set_handler_callback(RESUME+MAP, cli_resume);
881         set_handler_callback(RESIZE+MAP, cli_resize);
882         set_handler_callback(RELOAD+MAP, cli_reload);
883         set_handler_callback(RESET+MAP, cli_reassign);
884         set_handler_callback(REINSTATE+PATH, cli_reinstate);
885         set_handler_callback(FAIL+PATH, cli_fail);
886         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
887         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
888         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
889         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
890         set_handler_callback(QUIT, cli_quit);
891         set_handler_callback(SHUTDOWN, cli_shutdown);
892         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
893         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
894         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
895         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
896         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
897
898         umask(077);
899         uxsock_listen(&uxsock_trigger, ap);
900
901         return NULL;
902 }
903
904 void
905 exit_daemon (void)
906 {
907         sem_post(&exit_sem);
908 }
909
910 const char *
911 daemon_status(void)
912 {
913         switch (running_state) {
914         case DAEMON_INIT:
915                 return "init";
916         case DAEMON_START:
917                 return "startup";
918         case DAEMON_CONFIGURE:
919                 return "configure";
920         case DAEMON_RUNNING:
921                 return "running";
922         case DAEMON_SHUTDOWN:
923                 return "shutdown";
924         }
925         return NULL;
926 }
927
928 static void
929 fail_path (struct path * pp, int del_active)
930 {
931         if (!pp->mpp)
932                 return;
933
934         condlog(2, "checker failed path %s in map %s",
935                  pp->dev_t, pp->mpp->alias);
936
937         dm_fail_path(pp->mpp->alias, pp->dev_t);
938         if (del_active)
939                 update_queue_mode_del_path(pp->mpp);
940 }
941
942 /*
943  * caller must have locked the path list before calling that function
944  */
945 static void
946 reinstate_path (struct path * pp, int add_active)
947 {
948         if (!pp->mpp)
949                 return;
950
951         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
952                 condlog(0, "%s: reinstate failed", pp->dev_t);
953         else {
954                 condlog(2, "%s: reinstated", pp->dev_t);
955                 if (add_active)
956                         update_queue_mode_add_path(pp->mpp);
957         }
958 }
959
960 static void
961 enable_group(struct path * pp)
962 {
963         struct pathgroup * pgp;
964
965         /*
966          * if path is added through uev_add_path, pgindex can be unset.
967          * next update_strings() will set it, upon map reload event.
968          *
969          * we can safely return here, because upon map reload, all
970          * PG will be enabled.
971          */
972         if (!pp->mpp->pg || !pp->pgindex)
973                 return;
974
975         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
976
977         if (pgp->status == PGSTATE_DISABLED) {
978                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
979                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
980         }
981 }
982
983 static void
984 mpvec_garbage_collector (struct vectors * vecs)
985 {
986         struct multipath * mpp;
987         unsigned int i;
988
989         if (!vecs->mpvec)
990                 return;
991
992         vector_foreach_slot (vecs->mpvec, mpp, i) {
993                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
994                         condlog(2, "%s: remove dead map", mpp->alias);
995                         remove_map_and_stop_waiter(mpp, vecs, 1);
996                         i--;
997                 }
998         }
999 }
1000
1001 /* This is called after a path has started working again. It the multipath
1002  * device for this path uses the followover failback type, and this is the
1003  * best pathgroup, and this is the first path in the pathgroup to come back
1004  * up, then switch to this pathgroup */
1005 static int
1006 followover_should_failback(struct path * pp)
1007 {
1008         struct pathgroup * pgp;
1009         struct path *pp1;
1010         int i;
1011
1012         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1013             !pp->mpp->pg || !pp->pgindex ||
1014             pp->pgindex != pp->mpp->bestpg)
1015                 return 0;
1016
1017         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1018         vector_foreach_slot(pgp->paths, pp1, i) {
1019                 if (pp1 == pp)
1020                         continue;
1021                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1022                         return 0;
1023         }
1024         return 1;
1025 }
1026
1027 static void
1028 defered_failback_tick (vector mpvec)
1029 {
1030         struct multipath * mpp;
1031         unsigned int i;
1032
1033         vector_foreach_slot (mpvec, mpp, i) {
1034                 /*
1035                  * defered failback getting sooner
1036                  */
1037                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1038                         mpp->failback_tick--;
1039
1040                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1041                                 switch_pathgroup(mpp);
1042                 }
1043         }
1044 }
1045
1046 static void
1047 retry_count_tick(vector mpvec)
1048 {
1049         struct multipath *mpp;
1050         unsigned int i;
1051
1052         vector_foreach_slot (mpvec, mpp, i) {
1053                 if (mpp->retry_tick) {
1054                         mpp->stat_total_queueing_time++;
1055                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1056                         if(--mpp->retry_tick == 0) {
1057                                 dm_queue_if_no_path(mpp->alias, 0);
1058                                 condlog(2, "%s: Disable queueing", mpp->alias);
1059                         }
1060                 }
1061         }
1062 }
1063
1064 int update_prio(struct path *pp, int refresh_all)
1065 {
1066         int oldpriority;
1067         struct path *pp1;
1068         struct pathgroup * pgp;
1069         int i, j, changed = 0;
1070
1071         if (refresh_all) {
1072                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1073                         vector_foreach_slot (pgp->paths, pp1, j) {
1074                                 oldpriority = pp1->priority;
1075                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1076                                 if (pp1->priority != oldpriority)
1077                                         changed = 1;
1078                         }
1079                 }
1080                 return changed;
1081         }
1082         oldpriority = pp->priority;
1083         pathinfo(pp, conf->hwtable, DI_PRIO);
1084
1085         if (pp->priority == oldpriority)
1086                 return 0;
1087         return 1;
1088 }
1089
1090 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1091 {
1092         if (reload_map(vecs, mpp, refresh))
1093                 return 1;
1094
1095         dm_lib_release();
1096         if (setup_multipath(vecs, mpp) != 0)
1097                 return 1;
1098         sync_map_state(mpp);
1099
1100         return 0;
1101 }
1102
1103 void
1104 check_path (struct vectors * vecs, struct path * pp)
1105 {
1106         int newstate;
1107         int new_path_up = 0;
1108         int chkr_new_path_up = 0;
1109         int oldchkrstate = pp->chkrstate;
1110
1111         if (!pp->mpp)
1112                 return;
1113
1114         if (pp->tick && --pp->tick)
1115                 return; /* don't check this path yet */
1116
1117         /*
1118          * provision a next check soonest,
1119          * in case we exit abnormaly from here
1120          */
1121         pp->tick = conf->checkint;
1122
1123         newstate = path_offline(pp);
1124         if (newstate == PATH_UP)
1125                 newstate = get_state(pp, 1);
1126
1127         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1128                 condlog(2, "%s: unusable path", pp->dev);
1129                 pathinfo(pp, conf->hwtable, 0);
1130                 return;
1131         }
1132         /*
1133          * Async IO in flight. Keep the previous path state
1134          * and reschedule as soon as possible
1135          */
1136         if (newstate == PATH_PENDING) {
1137                 pp->tick = 1;
1138                 return;
1139         }
1140         /*
1141          * Synchronize with kernel state
1142          */
1143         if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1144                 condlog(1, "%s: Could not synchronize with kernel state",
1145                         pp->dev);
1146                 pp->dmstate = PSTATE_UNDEF;
1147         }
1148         pp->chkrstate = newstate;
1149         if (newstate != pp->state) {
1150                 int oldstate = pp->state;
1151                 pp->state = newstate;
1152                 LOG_MSG(1, checker_message(&pp->checker));
1153
1154                 /*
1155                  * upon state change, reset the checkint
1156                  * to the shortest delay
1157                  */
1158                 pp->checkint = conf->checkint;
1159
1160                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1161                         /*
1162                          * proactively fail path in the DM
1163                          */
1164                         if (oldstate == PATH_UP ||
1165                             oldstate == PATH_GHOST)
1166                                 fail_path(pp, 1);
1167                         else
1168                                 fail_path(pp, 0);
1169
1170                         /*
1171                          * cancel scheduled failback
1172                          */
1173                         pp->mpp->failback_tick = 0;
1174
1175                         pp->mpp->stat_path_failures++;
1176                         return;
1177                 }
1178
1179                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1180                         if ( pp->mpp && pp->mpp->prflag ){
1181                                 /*
1182                                  * Check Persistent Reservation.
1183                                  */
1184                         condlog(2, "%s: checking persistent reservation "
1185                                 "registration", pp->dev);
1186                         mpath_pr_event_handle(pp);
1187                         }
1188                 }
1189
1190                 /*
1191                  * reinstate this path
1192                  */
1193                 if (oldstate != PATH_UP &&
1194                     oldstate != PATH_GHOST)
1195                         reinstate_path(pp, 1);
1196                 else
1197                         reinstate_path(pp, 0);
1198
1199                 new_path_up = 1;
1200
1201                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1202                         chkr_new_path_up = 1;
1203
1204                 /*
1205                  * if at least one path is up in a group, and
1206                  * the group is disabled, re-enable it
1207                  */
1208                 if (newstate == PATH_UP)
1209                         enable_group(pp);
1210         }
1211         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1212                 if (pp->dmstate == PSTATE_FAILED ||
1213                     pp->dmstate == PSTATE_UNDEF) {
1214                         /* Clear IO errors */
1215                         reinstate_path(pp, 0);
1216                 } else {
1217                         LOG_MSG(4, checker_message(&pp->checker));
1218                         if (pp->checkint != conf->max_checkint) {
1219                                 /*
1220                                  * double the next check delay.
1221                                  * max at conf->max_checkint
1222                                  */
1223                                 if (pp->checkint < (conf->max_checkint / 2))
1224                                         pp->checkint = 2 * pp->checkint;
1225                                 else
1226                                         pp->checkint = conf->max_checkint;
1227
1228                                 condlog(4, "%s: delay next check %is",
1229                                         pp->dev_t, pp->checkint);
1230                         }
1231                         pp->tick = pp->checkint;
1232                 }
1233         }
1234         else if (newstate == PATH_DOWN) {
1235                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1236                         LOG_MSG(3, checker_message(&pp->checker));
1237                 else
1238                         LOG_MSG(2, checker_message(&pp->checker));
1239         }
1240
1241         pp->state = newstate;
1242
1243         /*
1244          * path prio refreshing
1245          */
1246         condlog(4, "path prio refresh");
1247
1248         if (update_prio(pp, new_path_up) &&
1249             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1250              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1251                 update_path_groups(pp->mpp, vecs, !new_path_up);
1252         else if (need_switch_pathgroup(pp->mpp, 0)) {
1253                 if (pp->mpp->pgfailback > 0 &&
1254                     (new_path_up || pp->mpp->failback_tick <= 0))
1255                         pp->mpp->failback_tick =
1256                                 pp->mpp->pgfailback + 1;
1257                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1258                          (chkr_new_path_up && followover_should_failback(pp)))
1259                         switch_pathgroup(pp->mpp);
1260         }
1261 }
1262
1263 static void *
1264 checkerloop (void *ap)
1265 {
1266         struct vectors *vecs;
1267         struct path *pp;
1268         int count = 0;
1269         unsigned int i;
1270
1271         mlockall(MCL_CURRENT | MCL_FUTURE);
1272         vecs = (struct vectors *)ap;
1273         condlog(2, "path checkers start up");
1274
1275         /*
1276          * init the path check interval
1277          */
1278         vector_foreach_slot (vecs->pathvec, pp, i) {
1279                 pp->checkint = conf->checkint;
1280         }
1281
1282         while (1) {
1283                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1284                 lock(vecs->lock);
1285                 pthread_testcancel();
1286                 condlog(4, "tick");
1287
1288                 if (vecs->pathvec) {
1289                         vector_foreach_slot (vecs->pathvec, pp, i) {
1290                                 check_path(vecs, pp);
1291                         }
1292                 }
1293                 if (vecs->mpvec) {
1294                         defered_failback_tick(vecs->mpvec);
1295                         retry_count_tick(vecs->mpvec);
1296                 }
1297                 if (count)
1298                         count--;
1299                 else {
1300                         condlog(4, "map garbage collection");
1301                         mpvec_garbage_collector(vecs);
1302                         count = MAPGCINT;
1303                 }
1304
1305                 lock_cleanup_pop(vecs->lock);
1306                 sleep(1);
1307         }
1308         return NULL;
1309 }
1310
1311 int
1312 configure (struct vectors * vecs, int start_waiters)
1313 {
1314         struct multipath * mpp;
1315         struct path * pp;
1316         vector mpvec;
1317         int i;
1318
1319         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1320                 return 1;
1321
1322         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1323                 return 1;
1324
1325         if (!(mpvec = vector_alloc()))
1326                 return 1;
1327
1328         /*
1329          * probe for current path (from sysfs) and map (from dm) sets
1330          */
1331         path_discovery(vecs->pathvec, conf, DI_ALL);
1332
1333         vector_foreach_slot (vecs->pathvec, pp, i){
1334                 if (filter_path(conf, pp) > 0){
1335                         vector_del_slot(vecs->pathvec, i);
1336                         free_path(pp);
1337                         i--;
1338                 }
1339                 else
1340                         pp->checkint = conf->checkint;
1341         }
1342         if (map_discovery(vecs))
1343                 return 1;
1344
1345         /*
1346          * create new set of maps & push changed ones into dm
1347          */
1348         if (coalesce_paths(vecs, mpvec, NULL, 1))
1349                 return 1;
1350
1351         /*
1352          * may need to remove some maps which are no longer relevant
1353          * e.g., due to blacklist changes in conf file
1354          */
1355         if (coalesce_maps(vecs, mpvec))
1356                 return 1;
1357
1358         dm_lib_release();
1359
1360         sync_maps_state(mpvec);
1361         vector_foreach_slot(mpvec, mpp, i){
1362                 remember_wwid(mpp->wwid);
1363                 update_map_pr(mpp);
1364         }
1365
1366         /*
1367          * purge dm of old maps
1368          */
1369         remove_maps(vecs);
1370
1371         /*
1372          * save new set of maps formed by considering current path state
1373          */
1374         vector_free(vecs->mpvec);
1375         vecs->mpvec = mpvec;
1376
1377         /*
1378          * start dm event waiter threads for these new maps
1379          */
1380         vector_foreach_slot(vecs->mpvec, mpp, i) {
1381                 if (setup_multipath(vecs, mpp))
1382                         return 1;
1383                 if (start_waiters)
1384                         if (start_waiter_thread(mpp, vecs))
1385                                 return 1;
1386         }
1387         return 0;
1388 }
1389
1390 int
1391 reconfigure (struct vectors * vecs)
1392 {
1393         struct config * old = conf;
1394         int retval = 1;
1395
1396         /*
1397          * free old map and path vectors ... they use old conf state
1398          */
1399         if (VECTOR_SIZE(vecs->mpvec))
1400                 remove_maps_and_stop_waiters(vecs);
1401
1402         if (VECTOR_SIZE(vecs->pathvec))
1403                 free_pathvec(vecs->pathvec, FREE_PATHS);
1404
1405         vecs->pathvec = NULL;
1406         conf = NULL;
1407
1408         if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1409                 conf->verbosity = old->verbosity;
1410                 conf->daemon = 1;
1411                 configure(vecs, 1);
1412                 free_config(old);
1413                 retval = 0;
1414         }
1415
1416         return retval;
1417 }
1418
1419 static struct vectors *
1420 init_vecs (void)
1421 {
1422         struct vectors * vecs;
1423
1424         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1425
1426         if (!vecs)
1427                 return NULL;
1428
1429         vecs->lock.mutex =
1430                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1431
1432         if (!vecs->lock.mutex)
1433                 goto out;
1434
1435         pthread_mutex_init(vecs->lock.mutex, NULL);
1436         vecs->lock.depth = 0;
1437
1438         return vecs;
1439
1440 out:
1441         FREE(vecs);
1442         condlog(0, "failed to init paths");
1443         return NULL;
1444 }
1445
1446 static void *
1447 signal_set(int signo, void (*func) (int))
1448 {
1449         int r;
1450         struct sigaction sig;
1451         struct sigaction osig;
1452
1453         sig.sa_handler = func;
1454         sigemptyset(&sig.sa_mask);
1455         sig.sa_flags = 0;
1456
1457         r = sigaction(signo, &sig, &osig);
1458
1459         if (r < 0)
1460                 return (SIG_ERR);
1461         else
1462                 return (osig.sa_handler);
1463 }
1464
1465 void
1466 handle_signals(void)
1467 {
1468         if (reconfig_sig && running_state == DAEMON_RUNNING) {
1469                 condlog(2, "reconfigure (signal)");
1470                 pthread_cleanup_push(cleanup_lock,
1471                                 &gvecs->lock);
1472                 lock(gvecs->lock);
1473                 pthread_testcancel();
1474                 reconfigure(gvecs);
1475                 lock_cleanup_pop(gvecs->lock);
1476         }
1477         if (log_reset_sig) {
1478                 condlog(2, "reset log (signal)");
1479                 pthread_mutex_lock(&logq_lock);
1480                 log_reset("multipathd");
1481                 pthread_mutex_unlock(&logq_lock);
1482         }
1483         reconfig_sig = 0;
1484         log_reset_sig = 0;
1485 }
1486
1487 static void
1488 sighup (int sig)
1489 {
1490         reconfig_sig = 1;
1491 }
1492
1493 static void
1494 sigend (int sig)
1495 {
1496         exit_daemon();
1497 }
1498
1499 static void
1500 sigusr1 (int sig)
1501 {
1502         log_reset_sig = 1;
1503 }
1504
1505 static void
1506 signal_init(void)
1507 {
1508         sigset_t set;
1509
1510         sigemptyset(&set);
1511         sigaddset(&set, SIGHUP);
1512         sigaddset(&set, SIGUSR1);
1513         pthread_sigmask(SIG_BLOCK, &set, NULL);
1514
1515         signal_set(SIGHUP, sighup);
1516         signal_set(SIGUSR1, sigusr1);
1517         signal_set(SIGINT, sigend);
1518         signal_set(SIGTERM, sigend);
1519         signal(SIGPIPE, SIG_IGN);
1520 }
1521
1522 static void
1523 setscheduler (void)
1524 {
1525         int res;
1526         static struct sched_param sched_param = {
1527                 .sched_priority = 99
1528         };
1529
1530         res = sched_setscheduler (0, SCHED_RR, &sched_param);
1531
1532         if (res == -1)
1533                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1534         return;
1535 }
1536
1537 static void
1538 set_oom_adj (void)
1539 {
1540 #ifdef OOM_SCORE_ADJ_MIN
1541         int retry = 1;
1542         char *file = "/proc/self/oom_score_adj";
1543         int score = OOM_SCORE_ADJ_MIN;
1544 #else
1545         int retry = 0;
1546         char *file = "/proc/self/oom_adj";
1547         int score = OOM_ADJUST_MIN;
1548 #endif
1549         FILE *fp;
1550         struct stat st;
1551
1552         do {
1553                 if (stat(file, &st) == 0){
1554                         fp = fopen(file, "w");
1555                         if (!fp) {
1556                                 condlog(0, "couldn't fopen %s : %s", file,
1557                                         strerror(errno));
1558                                 return;
1559                         }
1560                         fprintf(fp, "%i", score);
1561                         fclose(fp);
1562                         return;
1563                 }
1564                 if (errno != ENOENT) {
1565                         condlog(0, "couldn't stat %s : %s", file,
1566                                 strerror(errno));
1567                         return;
1568                 }
1569 #ifdef OOM_ADJUST_MIN
1570                 file = "/proc/self/oom_adj";
1571                 score = OOM_ADJUST_MIN;
1572 #else
1573                 retry = 0;
1574 #endif
1575         } while (retry--);
1576         condlog(0, "couldn't adjust oom score");
1577 }
1578
1579 static int
1580 child (void * param)
1581 {
1582         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1583         pthread_attr_t log_attr, misc_attr, uevent_attr;
1584         struct vectors * vecs;
1585         struct multipath * mpp;
1586         int i;
1587         int rc, pid_rc;
1588
1589         mlockall(MCL_CURRENT | MCL_FUTURE);
1590         sem_init(&exit_sem, 0, 0);
1591         signal_init();
1592
1593         udev = udev_new();
1594
1595         setup_thread_attr(&misc_attr, 64 * 1024, 1);
1596         setup_thread_attr(&uevent_attr, 128 * 1024, 1);
1597         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1598
1599         if (logsink) {
1600                 setup_thread_attr(&log_attr, 64 * 1024, 0);
1601                 log_thread_start(&log_attr);
1602                 pthread_attr_destroy(&log_attr);
1603         }
1604
1605         running_state = DAEMON_START;
1606
1607         condlog(2, "--------start up--------");
1608         condlog(2, "read " DEFAULT_CONFIGFILE);
1609
1610         if (load_config(DEFAULT_CONFIGFILE, udev))
1611                 exit(1);
1612
1613         if (init_checkers()) {
1614                 condlog(0, "failed to initialize checkers");
1615                 exit(1);
1616         }
1617         if (init_prio()) {
1618                 condlog(0, "failed to initialize prioritizers");
1619                 exit(1);
1620         }
1621
1622         setlogmask(LOG_UPTO(conf->verbosity + 3));
1623
1624         if (conf->max_fds) {
1625                 struct rlimit fd_limit;
1626
1627                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1628                         condlog(0, "can't get open fds limit: %s",
1629                                 strerror(errno));
1630                         fd_limit.rlim_cur = 0;
1631                         fd_limit.rlim_max = 0;
1632                 }
1633                 if (fd_limit.rlim_cur < conf->max_fds) {
1634                         fd_limit.rlim_cur = conf->max_fds;
1635                         if (fd_limit.rlim_max < conf->max_fds)
1636                                 fd_limit.rlim_max = conf->max_fds;
1637                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1638                                 condlog(0, "can't set open fds limit to "
1639                                         "%lu/%lu : %s",
1640                                         fd_limit.rlim_cur, fd_limit.rlim_max,
1641                                         strerror(errno));
1642                         } else {
1643                                 condlog(3, "set open fds limit to %lu/%lu",
1644                                         fd_limit.rlim_cur, fd_limit.rlim_max);
1645                         }
1646                 }
1647
1648         }
1649
1650         vecs = gvecs = init_vecs();
1651         if (!vecs)
1652                 exit(1);
1653
1654         setscheduler();
1655         set_oom_adj();
1656
1657         conf->daemon = 1;
1658         udev_set_sync_support(0);
1659         /*
1660          * Start uevent listener early to catch events
1661          */
1662         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
1663                 condlog(0, "failed to create uevent thread: %d", rc);
1664                 exit(1);
1665         }
1666         pthread_attr_destroy(&uevent_attr);
1667         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1668                 condlog(0, "failed to create cli listener: %d", rc);
1669                 exit(1);
1670         }
1671         /*
1672          * fetch and configure both paths and multipaths
1673          */
1674         running_state = DAEMON_CONFIGURE;
1675
1676         lock(vecs->lock);
1677         if (configure(vecs, 1)) {
1678                 unlock(vecs->lock);
1679                 condlog(0, "failure during configuration");
1680                 exit(1);
1681         }
1682         unlock(vecs->lock);
1683
1684         /*
1685          * start threads
1686          */
1687         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1688                 condlog(0,"failed to create checker loop thread: %d", rc);
1689                 exit(1);
1690         }
1691         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1692                 condlog(0, "failed to create uevent dispatcher: %d", rc);
1693                 exit(1);
1694         }
1695         pthread_attr_destroy(&misc_attr);
1696
1697         /* Startup complete, create logfile */
1698         pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1699         /* Ignore errors, we can live without */
1700
1701         running_state = DAEMON_RUNNING;
1702
1703         /*
1704          * exit path
1705          */
1706         while(sem_wait(&exit_sem) != 0); /* Do nothing */
1707         running_state = DAEMON_SHUTDOWN;
1708         lock(vecs->lock);
1709         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1710                 vector_foreach_slot(vecs->mpvec, mpp, i)
1711                         dm_queue_if_no_path(mpp->alias, 0);
1712         remove_maps_and_stop_waiters(vecs);
1713         unlock(vecs->lock);
1714
1715         pthread_cancel(check_thr);
1716         pthread_cancel(uevent_thr);
1717         pthread_cancel(uxlsnr_thr);
1718         pthread_cancel(uevq_thr);
1719
1720         lock(vecs->lock);
1721         free_pathvec(vecs->pathvec, FREE_PATHS);
1722         vecs->pathvec = NULL;
1723         unlock(vecs->lock);
1724         /* Now all the waitevent threads will start rushing in. */
1725         while (vecs->lock.depth > 0) {
1726                 sleep (1); /* This is weak. */
1727                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1728                         " waiting...", vecs->lock.depth);
1729         }
1730         pthread_mutex_destroy(vecs->lock.mutex);
1731         FREE(vecs->lock.mutex);
1732         vecs->lock.depth = 0;
1733         vecs->lock.mutex = NULL;
1734         FREE(vecs);
1735         vecs = NULL;
1736
1737         cleanup_checkers();
1738         cleanup_prio();
1739
1740         dm_lib_release();
1741         dm_lib_exit();
1742
1743         /* We're done here */
1744         if (!pid_rc) {
1745                 condlog(3, "unlink pidfile");
1746                 unlink(DEFAULT_PIDFILE);
1747         }
1748
1749         condlog(2, "--------shut down-------");
1750
1751         if (logsink)
1752                 log_thread_stop();
1753
1754         /*
1755          * Freeing config must be done after condlog() and dm_lib_exit(),
1756          * because logging functions like dlog() and dm_write_log()
1757          * reference the config.
1758          */
1759         free_config(conf);
1760         conf = NULL;
1761         udev_unref(udev);
1762         udev = NULL;
1763 #ifdef _DEBUG_
1764         dbg_free_final(NULL);
1765 #endif
1766
1767         exit(0);
1768 }
1769
1770 static int
1771 daemonize(void)
1772 {
1773         int pid;
1774         int dev_null_fd;
1775
1776         if( (pid = fork()) < 0){
1777                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1778                 return -1;
1779         }
1780         else if (pid != 0)
1781                 return pid;
1782
1783         setsid();
1784
1785         if ( (pid = fork()) < 0)
1786                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1787         else if (pid != 0)
1788                 _exit(0);
1789
1790         if (chdir("/") < 0)
1791                 fprintf(stderr, "cannot chdir to '/', continuing\n");
1792
1793         dev_null_fd = open("/dev/null", O_RDWR);
1794         if (dev_null_fd < 0){
1795                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1796                         strerror(errno));
1797                 _exit(0);
1798         }
1799
1800         close(STDIN_FILENO);
1801         if (dup(dev_null_fd) < 0) {
1802                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1803                         strerror(errno));
1804                 _exit(0);
1805         }
1806         close(STDOUT_FILENO);
1807         if (dup(dev_null_fd) < 0) {
1808                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1809                         strerror(errno));
1810                 _exit(0);
1811         }
1812         close(STDERR_FILENO);
1813         if (dup(dev_null_fd) < 0) {
1814                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1815                         strerror(errno));
1816                 _exit(0);
1817         }
1818         close(dev_null_fd);
1819         daemon_pid = getpid();
1820         return 0;
1821 }
1822
1823 int
1824 main (int argc, char *argv[])
1825 {
1826         extern char *optarg;
1827         extern int optind;
1828         int arg;
1829         int err;
1830
1831         logsink = 1;
1832         running_state = DAEMON_INIT;
1833         dm_init();
1834
1835         if (getuid() != 0) {
1836                 fprintf(stderr, "need to be root\n");
1837                 exit(1);
1838         }
1839
1840         /* make sure we don't lock any path */
1841         if (chdir("/") < 0)
1842                 fprintf(stderr, "can't chdir to root directory : %s\n",
1843                         strerror(errno));
1844         umask(umask(077) | 022);
1845
1846         conf = alloc_config();
1847
1848         if (!conf)
1849                 exit(1);
1850
1851         while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1852         switch(arg) {
1853                 case 'd':
1854                         logsink = 0;
1855                         //debug=1; /* ### comment me out ### */
1856                         break;
1857                 case 'v':
1858                         if (sizeof(optarg) > sizeof(char *) ||
1859                             !isdigit(optarg[0]))
1860                                 exit(1);
1861
1862                         conf->verbosity = atoi(optarg);
1863                         break;
1864                 case 'k':
1865                         uxclnt(optarg);
1866                         exit(0);
1867                 default:
1868                         ;
1869                 }
1870         }
1871         if (optind < argc) {
1872                 char cmd[CMDSIZE];
1873                 char * s = cmd;
1874                 char * c = s;
1875
1876                 while (optind < argc) {
1877                         if (strchr(argv[optind], ' '))
1878                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1879                         else
1880                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1881                         optind++;
1882                 }
1883                 c += snprintf(c, s + CMDSIZE - c, "\n");
1884                 uxclnt(s);
1885                 exit(0);
1886         }
1887
1888         if (!logsink)
1889                 err = 0;
1890         else
1891                 err = daemonize();
1892
1893         if (err < 0)
1894                 /* error */
1895                 exit(1);
1896         else if (err > 0)
1897                 /* parent dies */
1898                 exit(0);
1899         else
1900                 /* child lives */
1901                 return (child(NULL));
1902 }
1903
1904 void *  mpath_pr_event_handler_fn (void * pathp )
1905 {
1906         struct multipath * mpp;
1907         int i,j, ret, isFound;
1908         struct path * pp = (struct path *)pathp;
1909         unsigned char *keyp;
1910         uint64_t prkey;
1911         struct prout_param_descriptor *param;
1912         struct prin_resp *resp;
1913
1914         mpp = pp->mpp;
1915
1916         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1917         if (!resp){
1918                 condlog(0,"%s Alloc failed for prin response", pp->dev);
1919                 return NULL;
1920         }
1921
1922         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1923         if (ret != MPATH_PR_SUCCESS )
1924         {
1925                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1926                 goto out;
1927         }
1928
1929         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1930                         resp->prin_descriptor.prin_readkeys.additional_length );
1931
1932         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1933         {
1934                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1935                 ret = MPATH_PR_SUCCESS;
1936                 goto out;
1937         }
1938         prkey = 0;
1939         keyp = (unsigned char *)mpp->reservation_key;
1940         for (j = 0; j < 8; ++j) {
1941                 if (j > 0)
1942                         prkey <<= 8;
1943                 prkey |= *keyp;
1944                 ++keyp;
1945         }
1946         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
1947
1948         isFound =0;
1949         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1950         {
1951                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
1952                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1953                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1954                 {
1955                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1956                         isFound =1;
1957                         break;
1958                 }
1959         }
1960         if (!isFound)
1961         {
1962                 condlog(0, "%s: Either device not registered or ", pp->dev);
1963                 condlog(0, "host is not authorised for registration. Skip path");
1964                 ret = MPATH_PR_OTHER;
1965                 goto out;
1966         }
1967
1968         param= malloc(sizeof(struct prout_param_descriptor));
1969         memset(param, 0 , sizeof(struct prout_param_descriptor));
1970
1971         for (j = 7; j >= 0; --j) {
1972                 param->sa_key[j] = (prkey & 0xff);
1973                 prkey >>= 8;
1974         }
1975         param->num_transportid = 0;
1976
1977         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1978
1979         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1980         if (ret != MPATH_PR_SUCCESS )
1981         {
1982                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1983         }
1984         mpp->prflag = 1;
1985
1986         free(param);
1987 out:
1988         free(resp);
1989         return NULL;
1990 }
1991
1992 int mpath_pr_event_handle(struct path *pp)
1993 {
1994         pthread_t thread;
1995         int rc;
1996         pthread_attr_t attr;
1997         struct multipath * mpp;
1998
1999         mpp = pp->mpp;
2000
2001         if (!mpp->reservation_key)
2002                 return -1;
2003
2004         pthread_attr_init(&attr);
2005         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2006
2007         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2008         if (rc) {
2009                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2010                 return -1;
2011         }
2012         pthread_attr_destroy(&attr);
2013         rc = pthread_join(thread, NULL);
2014         return 0;
2015 }
2016