make multipathd disable queue_without_daemon by default
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <mpath_persist.h>
21
22 /*
23  * libcheckers
24  */
25 #include <checkers.h>
26
27 /*
28  * libmultipath
29  */
30 #include <parser.h>
31 #include <vector.h>
32 #include <memory.h>
33 #include <config.h>
34 #include <util.h>
35 #include <hwtable.h>
36 #include <defaults.h>
37 #include <structs.h>
38 #include <blacklist.h>
39 #include <structs_vec.h>
40 #include <dmparser.h>
41 #include <devmapper.h>
42 #include <sysfs.h>
43 #include <dict.h>
44 #include <discovery.h>
45 #include <debug.h>
46 #include <propsel.h>
47 #include <uevent.h>
48 #include <switchgroup.h>
49 #include <print.h>
50 #include <configure.h>
51 #include <prio.h>
52 #include <pgpolicies.h>
53 #include <uevent.h>
54
55 #include "main.h"
56 #include "pidfile.h"
57 #include "uxlsnr.h"
58 #include "uxclnt.h"
59 #include "cli.h"
60 #include "cli_handlers.h"
61 #include "lock.h"
62 #include "waiter.h"
63
64 #define FILE_NAME_SIZE 256
65 #define CMDSIZE 160
66
67 #define LOG_MSG(a, b) \
68 do { \
69         if (pp->offline) \
70                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
71         else if (strlen(b)) \
72                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
73 } while(0)
74
75 struct mpath_event_param
76 {
77         char * devname;
78         struct multipath *mpp;
79 };
80
81 unsigned int mpath_mx_alloc_len;
82
83 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
84 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85
86 int logsink;
87 enum daemon_status running_state;
88 pid_t daemon_pid;
89
90 /*
91  * global copy of vecs for use in sig handlers
92  */
93 struct vectors * gvecs;
94
95 static int
96 need_switch_pathgroup (struct multipath * mpp, int refresh)
97 {
98         struct pathgroup * pgp;
99         struct path * pp;
100         unsigned int i, j;
101
102         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
103                 return 0;
104
105         /*
106          * Refresh path priority values
107          */
108         if (refresh)
109                 vector_foreach_slot (mpp->pg, pgp, i)
110                         vector_foreach_slot (pgp->paths, pp, j)
111                                 pathinfo(pp, conf->hwtable, DI_PRIO);
112
113         mpp->bestpg = select_path_group(mpp);
114
115         if (mpp->bestpg != mpp->nextpg)
116                 return 1;
117
118         return 0;
119 }
120
121 static void
122 switch_pathgroup (struct multipath * mpp)
123 {
124         mpp->stat_switchgroup++;
125         dm_switchgroup(mpp->alias, mpp->bestpg);
126         condlog(2, "%s: switch to path group #%i",
127                  mpp->alias, mpp->bestpg);
128 }
129
130 static int
131 coalesce_maps(struct vectors *vecs, vector nmpv)
132 {
133         struct multipath * ompp;
134         vector ompv = vecs->mpvec;
135         unsigned int i;
136         int j;
137
138         vector_foreach_slot (ompv, ompp, i) {
139                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
140                         /*
141                          * remove all current maps not allowed by the
142                          * current configuration
143                          */
144                         if (dm_flush_map(ompp->alias)) {
145                                 condlog(0, "%s: unable to flush devmap",
146                                         ompp->alias);
147                                 /*
148                                  * may be just because the device is open
149                                  */
150                                 if (!vector_alloc_slot(nmpv))
151                                         return 1;
152
153                                 vector_set_slot(nmpv, ompp);
154                                 setup_multipath(vecs, ompp);
155
156                                 if ((j = find_slot(ompv, (void *)ompp)) != -1)
157                                         vector_del_slot(ompv, j);
158
159                                 continue;
160                         }
161                         else {
162                                 dm_lib_release();
163                                 condlog(2, "%s devmap removed", ompp->alias);
164                         }
165                 } else if (conf->reassign_maps) {
166                         condlog(3, "%s: Reassign existing device-mapper"
167                                 " devices", ompp->alias);
168                         dm_reassign(ompp->alias);
169                 }
170         }
171         return 0;
172 }
173
174 void
175 sync_map_state(struct multipath *mpp)
176 {
177         struct pathgroup *pgp;
178         struct path *pp;
179         unsigned int i, j;
180
181         if (!mpp->pg)
182                 return;
183
184         vector_foreach_slot (mpp->pg, pgp, i){
185                 vector_foreach_slot (pgp->paths, pp, j){
186                         if (pp->state == PATH_UNCHECKED || 
187                             pp->state == PATH_WILD)
188                                 continue;
189                         if ((pp->dmstate == PSTATE_FAILED ||
190                              pp->dmstate == PSTATE_UNDEF) &&
191                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
192                                 dm_reinstate_path(mpp->alias, pp->dev_t);
193                         else if ((pp->dmstate == PSTATE_ACTIVE ||
194                                   pp->dmstate == PSTATE_UNDEF) &&
195                                  (pp->state == PATH_DOWN ||
196                                   pp->state == PATH_SHAKY))
197                                 dm_fail_path(mpp->alias, pp->dev_t);
198                 }
199         }
200 }
201
202 static void
203 sync_maps_state(vector mpvec)
204 {
205         unsigned int i;
206         struct multipath *mpp;
207
208         vector_foreach_slot (mpvec, mpp, i)
209                 sync_map_state(mpp);
210 }
211
212 static int
213 flush_map(struct multipath * mpp, struct vectors * vecs)
214 {
215         /*
216          * clear references to this map before flushing so we can ignore
217          * the spurious uevent we may generate with the dm_flush_map call below
218          */
219         if (dm_flush_map(mpp->alias)) {
220                 /*
221                  * May not really be an error -- if the map was already flushed
222                  * from the device mapper by dmsetup(8) for instance.
223                  */
224                 condlog(0, "%s: can't flush", mpp->alias);
225                 return 1;
226         }
227         else {
228                 dm_lib_release();
229                 condlog(2, "%s: devmap removed", mpp->alias);
230         }
231
232         orphan_paths(vecs->pathvec, mpp);
233         remove_map_and_stop_waiter(mpp, vecs, 1);
234
235         return 0;
236 }
237
238 static int
239 uev_add_map (struct uevent * uev, struct vectors * vecs)
240 {
241         char *alias;
242         int major = -1, minor = -1, rc;
243
244         condlog(3, "%s: add map (uevent)", uev->kernel);
245         alias = uevent_get_dm_name(uev);
246         if (!alias) {
247                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
248                 major = uevent_get_major(uev);
249                 minor = uevent_get_minor(uev);
250                 alias = dm_mapname(major, minor);
251                 if (!alias) {
252                         condlog(2, "%s: mapname not found for %d:%d",
253                                 uev->kernel, major, minor);
254                         return 1;
255                 }
256         }
257         rc = ev_add_map(uev->kernel, alias, vecs);
258         FREE(alias);
259         return rc;
260 }
261
262 int
263 ev_add_map (char * dev, char * alias, struct vectors * vecs)
264 {
265         char * refwwid;
266         struct multipath * mpp;
267         int map_present;
268         int r = 1;
269
270         map_present = dm_map_present(alias);
271
272         if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
273                 condlog(4, "%s: not a multipath map", alias);
274                 return 0;
275         }
276
277         mpp = find_mp_by_alias(vecs->mpvec, alias);
278
279         if (mpp) {
280                 /*
281                  * Not really an error -- we generate our own uevent
282                  * if we create a multipath mapped device as a result
283                  * of uev_add_path
284                  */
285                 if (conf->reassign_maps) {
286                         condlog(3, "%s: Reassign existing device-mapper devices",
287                                 alias);
288                         dm_reassign(alias);
289                 }
290                 return 0;
291         }
292         condlog(2, "%s: adding map", alias);
293
294         /*
295          * now we can register the map
296          */
297         if (map_present && (mpp = add_map_without_path(vecs, alias))) {
298                 sync_map_state(mpp);
299                 condlog(2, "%s: devmap %s registered", alias, dev);
300                 return 0;
301         }
302         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
303
304         if (refwwid) {
305                 r = coalesce_paths(vecs, NULL, refwwid, 0);
306                 dm_lib_release();
307         }
308
309         if (!r)
310                 condlog(2, "%s: devmap %s added", alias, dev);
311         else if (r == 2)
312                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
313         else
314                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
315
316         FREE(refwwid);
317         return r;
318 }
319
320 static int
321 uev_remove_map (struct uevent * uev, struct vectors * vecs)
322 {
323         char *alias;
324         int minor;
325         struct multipath *mpp;
326
327         condlog(2, "%s: remove map (uevent)", uev->kernel);
328         alias = uevent_get_dm_name(uev);
329         if (!alias) {
330                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
331                 return 0;
332         }
333         minor = uevent_get_minor(uev);
334         mpp = find_mp_by_minor(vecs->mpvec, minor);
335
336         if (!mpp) {
337                 condlog(2, "%s: devmap not registered, can't remove",
338                         uev->kernel);
339                 goto out;
340         }
341         if (strcmp(mpp->alias, alias)) {
342                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
343                         mpp->alias, mpp->dmi->minor, minor);
344                 goto out;
345         }
346
347         orphan_paths(vecs->pathvec, mpp);
348         remove_map_and_stop_waiter(mpp, vecs, 1);
349 out:
350         FREE(alias);
351         return 0;
352 }
353
354 int
355 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
356 {
357         struct multipath * mpp;
358
359         mpp = find_mp_by_minor(vecs->mpvec, minor);
360
361         if (!mpp) {
362                 condlog(2, "%s: devmap not registered, can't remove",
363                         devname);
364                 return 0;
365         }
366         if (strcmp(mpp->alias, alias)) {
367                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
368                         mpp->alias, mpp->dmi->minor, minor);
369                 return 0;
370         }
371         return flush_map(mpp, vecs);
372 }
373
374 static int
375 uev_add_path (struct uevent *uev, struct vectors * vecs)
376 {
377         struct path *pp;
378         int ret, i;
379
380         condlog(2, "%s: add path (uevent)", uev->kernel);
381         if (strstr(uev->kernel, "..") != NULL) {
382                 /*
383                  * Don't allow relative device names in the pathvec
384                  */
385                 condlog(0, "%s: path name is invalid", uev->kernel);
386                 return 1;
387         }
388
389         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
390         if (pp) {
391                 condlog(0, "%s: spurious uevent, path already in pathvec",
392                         uev->kernel);
393                 if (pp->mpp)
394                         return 0;
395                 if (!strlen(pp->wwid)) {
396                         udev_device_unref(pp->udev);
397                         pp->udev = udev_device_ref(uev->udev);
398                         ret = pathinfo(pp, conf->hwtable,
399                                        DI_ALL | DI_BLACKLIST);
400                         if (ret == 2) {
401                                 i = find_slot(vecs->pathvec, (void *)pp);
402                                 if (i != -1)
403                                         vector_del_slot(vecs->pathvec, i);
404                                 free_path(pp);
405                                 return 0;
406                         } else if (ret == 1) {
407                                 condlog(0, "%s: failed to reinitialize path",
408                                         uev->kernel);
409                                 return 1;
410                         }
411                 }
412         } else {
413                 /*
414                  * get path vital state
415                  */
416                 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
417                                      uev->udev, DI_ALL, &pp);
418                 if (!pp) {
419                         if (ret == 2)
420                                 return 0;
421                         condlog(0, "%s: failed to store path info",
422                                 uev->kernel);
423                         return 1;
424                 }
425                 pp->checkint = conf->checkint;
426         }
427
428         return ev_add_path(pp, vecs);
429 }
430
431 /*
432  * returns:
433  * 0: added
434  * 1: error
435  */
436 int
437 ev_add_path (struct path * pp, struct vectors * vecs)
438 {
439         struct multipath * mpp;
440         char empty_buff[WWID_SIZE] = {0};
441         char params[PARAMS_SIZE] = {0};
442         int retries = 3;
443         int start_waiter = 0;
444
445         /*
446          * need path UID to go any further
447          */
448         if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
449                 condlog(0, "%s: failed to get path uid", pp->dev);
450                 goto fail; /* leave path added to pathvec */
451         }
452         mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
453 rescan:
454         if (mpp) {
455                 if ((!pp->size) || (mpp->size != pp->size)) {
456                         if (!pp->size)
457                                 condlog(0, "%s: failed to add new path %s, "
458                                         "device size is 0",
459                                         mpp->alias, pp->dev);
460                         else
461                                 condlog(0, "%s: failed to add new path %s, "
462                                         "device size mismatch",
463                                         mpp->alias, pp->dev);
464                         int i = find_slot(vecs->pathvec, (void *)pp);
465                         if (i != -1)
466                                 vector_del_slot(vecs->pathvec, i);
467                         free_path(pp);
468                         return 1;
469                 }
470
471                 condlog(4,"%s: adopting all paths for path %s",
472                         mpp->alias, pp->dev);
473                 if (adopt_paths(vecs->pathvec, mpp, 1))
474                         goto fail; /* leave path added to pathvec */
475
476                 verify_paths(mpp, vecs, NULL);
477                 mpp->flush_on_last_del = FLUSH_UNDEF;
478                 mpp->action = ACT_RELOAD;
479         }
480         else {
481                 if (!pp->size) {
482                         condlog(0, "%s: failed to create new map,"
483                                 " device size is 0 ", pp->dev);
484                         int i = find_slot(vecs->pathvec, (void *)pp);
485                         if (i != -1)
486                                 vector_del_slot(vecs->pathvec, i);
487                         free_path(pp);
488                         return 1;
489                 }
490
491                 condlog(4,"%s: creating new map", pp->dev);
492                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
493                         mpp->action = ACT_CREATE;
494                         /*
495                          * We don't depend on ACT_CREATE, as domap will
496                          * set it to ACT_NOTHING when complete.
497                          */
498                         start_waiter = 1;
499                 }
500                 else
501                         goto fail; /* leave path added to pathvec */
502         }
503
504         /* persistent reseravtion check*/
505         mpath_pr_event_handle(pp);      
506
507         /*
508          * push the map to the device-mapper
509          */
510         if (setup_map(mpp, params, PARAMS_SIZE)) {
511                 condlog(0, "%s: failed to setup map for addition of new "
512                         "path %s", mpp->alias, pp->dev);
513                 goto fail_map;
514         }
515         /*
516          * reload the map for the multipath mapped device
517          */
518         if (domap(mpp, params) <= 0) {
519                 condlog(0, "%s: failed in domap for addition of new "
520                         "path %s", mpp->alias, pp->dev);
521                 /*
522                  * deal with asynchronous uevents :((
523                  */
524                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
525                         condlog(0, "%s: uev_add_path sleep", mpp->alias);
526                         sleep(1);
527                         update_mpp_paths(mpp, vecs->pathvec);
528                         goto rescan;
529                 }
530                 else if (mpp->action == ACT_RELOAD)
531                         condlog(0, "%s: giving up reload", mpp->alias);
532                 else
533                         goto fail_map;
534         }
535         dm_lib_release();
536
537         /*
538          * update our state from kernel regardless of create or reload
539          */
540         if (setup_multipath(vecs, mpp))
541                 goto fail; /* if setup_multipath fails, it removes the map */
542
543         sync_map_state(mpp);
544
545         if ((mpp->action == ACT_CREATE ||
546              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
547             start_waiter_thread(mpp, vecs))
548                         goto fail_map;
549
550         if (retries >= 0) {
551                 condlog(2, "%s [%s]: path added to devmap %s",
552                         pp->dev, pp->dev_t, mpp->alias);
553                 return 0;
554         }
555         else
556                 return 1;
557
558 fail_map:
559         remove_map(mpp, vecs, 1);
560 fail:
561         orphan_path(pp);
562         return 1;
563 }
564
565 static int
566 uev_remove_path (struct uevent *uev, struct vectors * vecs)
567 {
568         struct path *pp;
569
570         condlog(2, "%s: remove path (uevent)", uev->kernel);
571         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
572
573         if (!pp) {
574                 /* Not an error; path might have been purged earlier */
575                 condlog(0, "%s: path already removed", uev->kernel);
576                 return 0;
577         }
578
579         return ev_remove_path(pp, vecs);
580 }
581
582 int
583 ev_remove_path (struct path *pp, struct vectors * vecs)
584 {
585         struct multipath * mpp;
586         int i, retval = 0;
587         char params[PARAMS_SIZE] = {0};
588
589         /*
590          * avoid referring to the map of an orphaned path
591          */
592         if ((mpp = pp->mpp)) {
593                 /*
594                  * transform the mp->pg vector of vectors of paths
595                  * into a mp->params string to feed the device-mapper
596                  */
597                 if (update_mpp_paths(mpp, vecs->pathvec)) {
598                         condlog(0, "%s: failed to update paths",
599                                 mpp->alias);
600                         goto fail;
601                 }
602                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
603                         vector_del_slot(mpp->paths, i);
604
605                 /*
606                  * remove the map IFF removing the last path
607                  */
608                 if (VECTOR_SIZE(mpp->paths) == 0) {
609                         char alias[WWID_SIZE];
610
611                         /*
612                          * flush_map will fail if the device is open
613                          */
614                         strncpy(alias, mpp->alias, WWID_SIZE);
615                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
616                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
617                                 mpp->retry_tick = 0;
618                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
619                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
620                                 dm_queue_if_no_path(mpp->alias, 0);
621                         }
622                         if (!flush_map(mpp, vecs)) {
623                                 condlog(2, "%s: removed map after"
624                                         " removing all paths",
625                                         alias);
626                                 retval = 0;
627                                 goto out;
628                         }
629                         /*
630                          * Not an error, continue
631                          */
632                 }
633
634                 if (setup_map(mpp, params, PARAMS_SIZE)) {
635                         condlog(0, "%s: failed to setup map for"
636                                 " removal of path %s", mpp->alias, pp->dev);
637                         goto fail;
638                 }
639                 /*
640                  * reload the map
641                  */
642                 mpp->action = ACT_RELOAD;
643                 if (domap(mpp, params) <= 0) {
644                         condlog(0, "%s: failed in domap for "
645                                 "removal of path %s",
646                                 mpp->alias, pp->dev);
647                         retval = 1;
648                 } else {
649                         /*
650                          * update our state from kernel
651                          */
652                         if (setup_multipath(vecs, mpp)) {
653                                 goto fail;
654                         }
655                         sync_map_state(mpp);
656
657                         condlog(2, "%s [%s]: path removed from map %s",
658                                 pp->dev, pp->dev_t, mpp->alias);
659                 }
660         }
661
662 out:
663         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
664                 vector_del_slot(vecs->pathvec, i);
665
666         free_path(pp);
667
668         return retval;
669
670 fail:
671         remove_map_and_stop_waiter(mpp, vecs, 1);
672         return 1;
673 }
674
675 static int
676 uev_update_path (struct uevent *uev, struct vectors * vecs)
677 {
678         int ro, retval = 0;
679
680         ro = uevent_get_disk_ro(uev);
681
682         if (ro >= 0) {
683                 struct path * pp;
684
685                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
686                         uev->kernel, ro);
687                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
688                 if (!pp) {
689                         condlog(0, "%s: spurious uevent, path not found",
690                                 uev->kernel);
691                         return 1;
692                 }
693                 if (pp->mpp) {
694                         retval = reload_map(vecs, pp->mpp, 0);
695
696                         condlog(2, "%s: map %s reloaded (retval %d)",
697                                 uev->kernel, pp->mpp->alias, retval);
698                 }
699
700         }
701
702         return retval;
703 }
704
705 static int
706 map_discovery (struct vectors * vecs)
707 {
708         struct multipath * mpp;
709         unsigned int i;
710
711         if (dm_get_maps(vecs->mpvec))
712                 return 1;
713
714         vector_foreach_slot (vecs->mpvec, mpp, i)
715                 if (setup_multipath(vecs, mpp))
716                         return 1;
717
718         return 0;
719 }
720
721 int
722 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
723 {
724         struct vectors * vecs;
725         int r;
726
727         *reply = NULL;
728         *len = 0;
729         vecs = (struct vectors *)trigger_data;
730
731         pthread_cleanup_push(cleanup_lock, &vecs->lock);
732         lock(vecs->lock);
733         pthread_testcancel();
734
735         r = parse_cmd(str, reply, len, vecs);
736
737         if (r > 0) {
738                 *reply = STRDUP("fail\n");
739                 *len = strlen(*reply) + 1;
740                 r = 1;
741         }
742         else if (!r && *len == 0) {
743                 *reply = STRDUP("ok\n");
744                 *len = strlen(*reply) + 1;
745                 r = 0;
746         }
747         /* else if (r < 0) leave *reply alone */
748
749         lock_cleanup_pop(vecs->lock);
750         return r;
751 }
752
753 static int
754 uev_discard(char * devpath)
755 {
756         char *tmp;
757         char a[11], b[11];
758
759         /*
760          * keep only block devices, discard partitions
761          */
762         tmp = strstr(devpath, "/block/");
763         if (tmp == NULL){
764                 condlog(4, "no /block/ in '%s'", devpath);
765                 return 1;
766         }
767         if (sscanf(tmp, "/block/%10s", a) != 1 ||
768             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
769                 condlog(4, "discard event on %s", devpath);
770                 return 1;
771         }
772         return 0;
773 }
774
775 int
776 uev_trigger (struct uevent * uev, void * trigger_data)
777 {
778         int r = 0;
779         struct vectors * vecs;
780
781         vecs = (struct vectors *)trigger_data;
782
783         if (uev_discard(uev->devpath))
784                 return 0;
785
786         pthread_cleanup_push(cleanup_lock, &vecs->lock);
787         lock(vecs->lock);
788         pthread_testcancel();
789
790         /*
791          * device map event
792          * Add events are ignored here as the tables
793          * are not fully initialised then.
794          */
795         if (!strncmp(uev->kernel, "dm-", 3)) {
796                 if (!strncmp(uev->action, "change", 6)) {
797                         r = uev_add_map(uev, vecs);
798                         goto out;
799                 }
800                 if (!strncmp(uev->action, "remove", 6)) {
801                         r = uev_remove_map(uev, vecs);
802                         goto out;
803                 }
804                 goto out;
805         }
806
807         /*
808          * path add/remove event
809          */
810         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
811                            uev->kernel) > 0)
812                 goto out;
813
814         if (!strncmp(uev->action, "add", 3)) {
815                 r = uev_add_path(uev, vecs);
816                 goto out;
817         }
818         if (!strncmp(uev->action, "remove", 6)) {
819                 r = uev_remove_path(uev, vecs);
820                 goto out;
821         }
822         if (!strncmp(uev->action, "change", 6)) {
823                 r = uev_update_path(uev, vecs);
824                 goto out;
825         }
826
827 out:
828         lock_cleanup_pop(vecs->lock);
829         return r;
830 }
831
832 static void *
833 ueventloop (void * ap)
834 {
835         block_signal(SIGUSR1, NULL);
836         block_signal(SIGHUP, NULL);
837
838         if (uevent_listen())
839                 condlog(0, "error starting uevent listener");
840
841         return NULL;
842 }
843
844 static void *
845 uevqloop (void * ap)
846 {
847         block_signal(SIGUSR1, NULL);
848         block_signal(SIGHUP, NULL);
849
850         if (uevent_dispatch(&uev_trigger, ap))
851                 condlog(0, "error starting uevent dispatcher");
852
853         return NULL;
854 }
855 static void *
856 uxlsnrloop (void * ap)
857 {
858         block_signal(SIGUSR1, NULL);
859         block_signal(SIGHUP, NULL);
860
861         if (cli_init())
862                 return NULL;
863
864         set_handler_callback(LIST+PATHS, cli_list_paths);
865         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
866         set_handler_callback(LIST+MAPS, cli_list_maps);
867         set_handler_callback(LIST+STATUS, cli_list_status);
868         set_handler_callback(LIST+DAEMON, cli_list_daemon);
869         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
870         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
871         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
872         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
873         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
874         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
875         set_handler_callback(LIST+CONFIG, cli_list_config);
876         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
877         set_handler_callback(LIST+DEVICES, cli_list_devices);
878         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
879         set_handler_callback(ADD+PATH, cli_add_path);
880         set_handler_callback(DEL+PATH, cli_del_path);
881         set_handler_callback(ADD+MAP, cli_add_map);
882         set_handler_callback(DEL+MAP, cli_del_map);
883         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
884         set_handler_callback(RECONFIGURE, cli_reconfigure);
885         set_handler_callback(SUSPEND+MAP, cli_suspend);
886         set_handler_callback(RESUME+MAP, cli_resume);
887         set_handler_callback(RESIZE+MAP, cli_resize);
888         set_handler_callback(RELOAD+MAP, cli_reload);
889         set_handler_callback(RESET+MAP, cli_reassign);
890         set_handler_callback(REINSTATE+PATH, cli_reinstate);
891         set_handler_callback(FAIL+PATH, cli_fail);
892         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
893         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
894         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
895         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
896         set_handler_callback(QUIT, cli_quit);
897         set_handler_callback(SHUTDOWN, cli_shutdown);
898         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
899         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
900         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
901         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
902         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
903
904         umask(077);
905         uxsock_listen(&uxsock_trigger, ap);
906
907         return NULL;
908 }
909
910 int
911 exit_daemon (int status)
912 {
913         if (status != 0)
914                 fprintf(stderr, "bad exit status. see daemon.log\n");
915
916         if (running_state != DAEMON_SHUTDOWN) {
917                 pthread_mutex_lock(&exit_mutex);
918                 pthread_cond_signal(&exit_cond);
919                 pthread_mutex_unlock(&exit_mutex);
920         }
921         return status;
922 }
923
924 const char *
925 daemon_status(void)
926 {
927         switch (running_state) {
928         case DAEMON_INIT:
929                 return "init";
930         case DAEMON_START:
931                 return "startup";
932         case DAEMON_CONFIGURE:
933                 return "configure";
934         case DAEMON_RUNNING:
935                 return "running";
936         case DAEMON_SHUTDOWN:
937                 return "shutdown";
938         }
939         return NULL;
940 }
941
942 static void
943 fail_path (struct path * pp, int del_active)
944 {
945         if (!pp->mpp)
946                 return;
947
948         condlog(2, "checker failed path %s in map %s",
949                  pp->dev_t, pp->mpp->alias);
950
951         dm_fail_path(pp->mpp->alias, pp->dev_t);
952         if (del_active)
953                 update_queue_mode_del_path(pp->mpp);
954 }
955
956 /*
957  * caller must have locked the path list before calling that function
958  */
959 static void
960 reinstate_path (struct path * pp, int add_active)
961 {
962         if (!pp->mpp)
963                 return;
964
965         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
966                 condlog(0, "%s: reinstate failed", pp->dev_t);
967         else {
968                 condlog(2, "%s: reinstated", pp->dev_t);
969                 if (add_active)
970                         update_queue_mode_add_path(pp->mpp);
971         }
972 }
973
974 static void
975 enable_group(struct path * pp)
976 {
977         struct pathgroup * pgp;
978
979         /*
980          * if path is added through uev_add_path, pgindex can be unset.
981          * next update_strings() will set it, upon map reload event.
982          *
983          * we can safely return here, because upon map reload, all
984          * PG will be enabled.
985          */
986         if (!pp->mpp->pg || !pp->pgindex)
987                 return;
988
989         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
990
991         if (pgp->status == PGSTATE_DISABLED) {
992                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
993                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
994         }
995 }
996
997 static void
998 mpvec_garbage_collector (struct vectors * vecs)
999 {
1000         struct multipath * mpp;
1001         unsigned int i;
1002
1003         if (!vecs->mpvec)
1004                 return;
1005
1006         vector_foreach_slot (vecs->mpvec, mpp, i) {
1007                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1008                         condlog(2, "%s: remove dead map", mpp->alias);
1009                         remove_map_and_stop_waiter(mpp, vecs, 1);
1010                         i--;
1011                 }
1012         }
1013 }
1014
1015 /* This is called after a path has started working again. It the multipath
1016  * device for this path uses the followover failback type, and this is the
1017  * best pathgroup, and this is the first path in the pathgroup to come back
1018  * up, then switch to this pathgroup */
1019 static int
1020 followover_should_failback(struct path * pp)
1021 {
1022         struct pathgroup * pgp;
1023         struct path *pp1;
1024         int i;
1025
1026         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1027             !pp->mpp->pg || !pp->pgindex ||
1028             pp->pgindex != pp->mpp->bestpg)
1029                 return 0;
1030
1031         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1032         vector_foreach_slot(pgp->paths, pp1, i) {
1033                 if (pp1 == pp)
1034                         continue;
1035                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1036                         return 0;
1037         }
1038         return 1;
1039 }
1040
1041 static void
1042 defered_failback_tick (vector mpvec)
1043 {
1044         struct multipath * mpp;
1045         unsigned int i;
1046
1047         vector_foreach_slot (mpvec, mpp, i) {
1048                 /*
1049                  * defered failback getting sooner
1050                  */
1051                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1052                         mpp->failback_tick--;
1053
1054                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1055                                 switch_pathgroup(mpp);
1056                 }
1057         }
1058 }
1059
1060 static void
1061 retry_count_tick(vector mpvec)
1062 {
1063         struct multipath *mpp;
1064         unsigned int i;
1065
1066         vector_foreach_slot (mpvec, mpp, i) {
1067                 if (mpp->retry_tick) {
1068                         mpp->stat_total_queueing_time++;
1069                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1070                         if(--mpp->retry_tick == 0) {
1071                                 dm_queue_if_no_path(mpp->alias, 0);
1072                                 condlog(2, "%s: Disable queueing", mpp->alias);
1073                         }
1074                 }
1075         }
1076 }
1077
1078 int update_prio(struct path *pp, int refresh_all)
1079 {
1080         int oldpriority;
1081         struct path *pp1;
1082         struct pathgroup * pgp;
1083         int i, j, changed = 0;
1084
1085         if (refresh_all) {
1086                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1087                         vector_foreach_slot (pgp->paths, pp1, j) {
1088                                 oldpriority = pp1->priority;
1089                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1090                                 if (pp1->priority != oldpriority)
1091                                         changed = 1;
1092                         }
1093                 }
1094                 return changed;
1095         }
1096         oldpriority = pp->priority;
1097         pathinfo(pp, conf->hwtable, DI_PRIO);
1098
1099         if (pp->priority == oldpriority)
1100                 return 0;
1101         return 1;
1102 }
1103
1104 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1105 {
1106         if (reload_map(vecs, mpp, refresh))
1107                 return 1;
1108
1109         dm_lib_release();
1110         if (setup_multipath(vecs, mpp) != 0)
1111                 return 1;
1112         sync_map_state(mpp);
1113
1114         return 0;
1115 }
1116
1117 void
1118 check_path (struct vectors * vecs, struct path * pp)
1119 {
1120         int newstate;
1121         int new_path_up = 0;
1122         int chkr_new_path_up = 0;
1123         int oldchkrstate = pp->chkrstate;
1124
1125         if (!pp->mpp)
1126                 return;
1127
1128         if (pp->tick && --pp->tick)
1129                 return; /* don't check this path yet */
1130
1131         /*
1132          * provision a next check soonest,
1133          * in case we exit abnormaly from here
1134          */
1135         pp->tick = conf->checkint;
1136
1137         newstate = path_offline(pp);
1138         if (newstate == PATH_UP)
1139                 newstate = get_state(pp, 1);
1140
1141         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1142                 condlog(2, "%s: unusable path", pp->dev);
1143                 pathinfo(pp, conf->hwtable, 0);
1144                 return;
1145         }
1146         /*
1147          * Async IO in flight. Keep the previous path state
1148          * and reschedule as soon as possible
1149          */
1150         if (newstate == PATH_PENDING) {
1151                 pp->tick = 1;
1152                 return;
1153         }
1154         /*
1155          * Synchronize with kernel state
1156          */
1157         if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1158                 condlog(1, "%s: Could not synchronize with kernel state",
1159                         pp->dev);
1160                 pp->dmstate = PSTATE_UNDEF;
1161         }
1162         pp->chkrstate = newstate;
1163         if (newstate != pp->state) {
1164                 int oldstate = pp->state;
1165                 pp->state = newstate;
1166                 LOG_MSG(1, checker_message(&pp->checker));
1167
1168                 /*
1169                  * upon state change, reset the checkint
1170                  * to the shortest delay
1171                  */
1172                 pp->checkint = conf->checkint;
1173
1174                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1175                         /*
1176                          * proactively fail path in the DM
1177                          */
1178                         if (oldstate == PATH_UP ||
1179                             oldstate == PATH_GHOST)
1180                                 fail_path(pp, 1);
1181                         else
1182                                 fail_path(pp, 0);
1183
1184                         /*
1185                          * cancel scheduled failback
1186                          */
1187                         pp->mpp->failback_tick = 0;
1188
1189                         pp->mpp->stat_path_failures++;
1190                         return;
1191                 }
1192
1193                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1194                         if ( pp->mpp && pp->mpp->prflag ){
1195                                 /*
1196                                  * Check Persistent Reservation.
1197                                  */
1198                         condlog(2, "%s: checking persistent reservation "
1199                                 "registration", pp->dev);
1200                         mpath_pr_event_handle(pp);
1201                         }
1202                 }
1203
1204                 /*
1205                  * reinstate this path
1206                  */
1207                 if (oldstate != PATH_UP &&
1208                     oldstate != PATH_GHOST)
1209                         reinstate_path(pp, 1);
1210                 else
1211                         reinstate_path(pp, 0);
1212
1213                 new_path_up = 1;
1214
1215                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1216                         chkr_new_path_up = 1;
1217
1218                 /*
1219                  * if at least one path is up in a group, and
1220                  * the group is disabled, re-enable it
1221                  */
1222                 if (newstate == PATH_UP)
1223                         enable_group(pp);
1224         }
1225         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1226                 if (pp->dmstate == PSTATE_FAILED ||
1227                     pp->dmstate == PSTATE_UNDEF) {
1228                         /* Clear IO errors */
1229                         reinstate_path(pp, 0);
1230                 } else {
1231                         LOG_MSG(4, checker_message(&pp->checker));
1232                         if (pp->checkint != conf->max_checkint) {
1233                                 /*
1234                                  * double the next check delay.
1235                                  * max at conf->max_checkint
1236                                  */
1237                                 if (pp->checkint < (conf->max_checkint / 2))
1238                                         pp->checkint = 2 * pp->checkint;
1239                                 else
1240                                         pp->checkint = conf->max_checkint;
1241
1242                                 condlog(4, "%s: delay next check %is",
1243                                         pp->dev_t, pp->checkint);
1244                         }
1245                         pp->tick = pp->checkint;
1246                 }
1247         }
1248         else if (newstate == PATH_DOWN) {
1249                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1250                         LOG_MSG(3, checker_message(&pp->checker));
1251                 else
1252                         LOG_MSG(2, checker_message(&pp->checker));
1253         }
1254
1255         pp->state = newstate;
1256
1257         /*
1258          * path prio refreshing
1259          */
1260         condlog(4, "path prio refresh");
1261
1262         if (update_prio(pp, new_path_up) &&
1263             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1264              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1265                 update_path_groups(pp->mpp, vecs, !new_path_up);
1266         else if (need_switch_pathgroup(pp->mpp, 0)) {
1267                 if (pp->mpp->pgfailback > 0 &&
1268                     (new_path_up || pp->mpp->failback_tick <= 0))
1269                         pp->mpp->failback_tick =
1270                                 pp->mpp->pgfailback + 1;
1271                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1272                          (chkr_new_path_up && followover_should_failback(pp)))
1273                         switch_pathgroup(pp->mpp);
1274         }
1275 }
1276
1277 static void *
1278 checkerloop (void *ap)
1279 {
1280         struct vectors *vecs;
1281         struct path *pp;
1282         int count = 0;
1283         unsigned int i;
1284         sigset_t old;
1285
1286         mlockall(MCL_CURRENT | MCL_FUTURE);
1287         vecs = (struct vectors *)ap;
1288         condlog(2, "path checkers start up");
1289
1290         /*
1291          * init the path check interval
1292          */
1293         vector_foreach_slot (vecs->pathvec, pp, i) {
1294                 pp->checkint = conf->checkint;
1295         }
1296
1297         while (1) {
1298                 block_signal(SIGHUP, &old);
1299                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1300                 lock(vecs->lock);
1301                 pthread_testcancel();
1302                 condlog(4, "tick");
1303
1304                 if (vecs->pathvec) {
1305                         vector_foreach_slot (vecs->pathvec, pp, i) {
1306                                 check_path(vecs, pp);
1307                         }
1308                 }
1309                 if (vecs->mpvec) {
1310                         defered_failback_tick(vecs->mpvec);
1311                         retry_count_tick(vecs->mpvec);
1312                 }
1313                 if (count)
1314                         count--;
1315                 else {
1316                         condlog(4, "map garbage collection");
1317                         mpvec_garbage_collector(vecs);
1318                         count = MAPGCINT;
1319                 }
1320
1321                 lock_cleanup_pop(vecs->lock);
1322                 pthread_sigmask(SIG_SETMASK, &old, NULL);
1323                 sleep(1);
1324         }
1325         return NULL;
1326 }
1327
1328 int
1329 configure (struct vectors * vecs, int start_waiters)
1330 {
1331         struct multipath * mpp;
1332         struct path * pp;
1333         vector mpvec;
1334         int i;
1335
1336         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1337                 return 1;
1338
1339         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1340                 return 1;
1341
1342         if (!(mpvec = vector_alloc()))
1343                 return 1;
1344
1345         /*
1346          * probe for current path (from sysfs) and map (from dm) sets
1347          */
1348         path_discovery(vecs->pathvec, conf, DI_ALL);
1349
1350         vector_foreach_slot (vecs->pathvec, pp, i){
1351                 if (filter_path(conf, pp) > 0){
1352                         vector_del_slot(vecs->pathvec, i);
1353                         free_path(pp);
1354                         i--;
1355                 }
1356                 else
1357                         pp->checkint = conf->checkint;
1358         }
1359         if (map_discovery(vecs))
1360                 return 1;
1361
1362         /*
1363          * create new set of maps & push changed ones into dm
1364          */
1365         if (coalesce_paths(vecs, mpvec, NULL, 1))
1366                 return 1;
1367
1368         /*
1369          * may need to remove some maps which are no longer relevant
1370          * e.g., due to blacklist changes in conf file
1371          */
1372         if (coalesce_maps(vecs, mpvec))
1373                 return 1;
1374
1375         dm_lib_release();
1376
1377         sync_maps_state(mpvec);
1378         vector_foreach_slot(mpvec, mpp, i){
1379                 remember_wwid(mpp->wwid);
1380                 update_map_pr(mpp);
1381         }
1382
1383         /*
1384          * purge dm of old maps
1385          */
1386         remove_maps(vecs);
1387
1388         /*
1389          * save new set of maps formed by considering current path state
1390          */
1391         vector_free(vecs->mpvec);
1392         vecs->mpvec = mpvec;
1393
1394         /*
1395          * start dm event waiter threads for these new maps
1396          */
1397         vector_foreach_slot(vecs->mpvec, mpp, i) {
1398                 if (setup_multipath(vecs, mpp))
1399                         return 1;
1400                 if (start_waiters)
1401                         if (start_waiter_thread(mpp, vecs))
1402                                 return 1;
1403         }
1404         return 0;
1405 }
1406
1407 int
1408 reconfigure (struct vectors * vecs)
1409 {
1410         struct config * old = conf;
1411         int retval = 1;
1412
1413         /*
1414          * free old map and path vectors ... they use old conf state
1415          */
1416         if (VECTOR_SIZE(vecs->mpvec))
1417                 remove_maps_and_stop_waiters(vecs);
1418
1419         if (VECTOR_SIZE(vecs->pathvec))
1420                 free_pathvec(vecs->pathvec, FREE_PATHS);
1421
1422         vecs->pathvec = NULL;
1423         conf = NULL;
1424
1425         if (!load_config(DEFAULT_CONFIGFILE)) {
1426                 conf->verbosity = old->verbosity;
1427                 conf->daemon = 1;
1428                 configure(vecs, 1);
1429                 free_config(old);
1430                 retval = 0;
1431         }
1432
1433         return retval;
1434 }
1435
1436 static struct vectors *
1437 init_vecs (void)
1438 {
1439         struct vectors * vecs;
1440
1441         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1442
1443         if (!vecs)
1444                 return NULL;
1445
1446         vecs->lock.mutex =
1447                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1448
1449         if (!vecs->lock.mutex)
1450                 goto out;
1451
1452         pthread_mutex_init(vecs->lock.mutex, NULL);
1453         vecs->lock.depth = 0;
1454
1455         return vecs;
1456
1457 out:
1458         FREE(vecs);
1459         condlog(0, "failed to init paths");
1460         return NULL;
1461 }
1462
1463 static void *
1464 signal_set(int signo, void (*func) (int))
1465 {
1466         int r;
1467         struct sigaction sig;
1468         struct sigaction osig;
1469
1470         sig.sa_handler = func;
1471         sigemptyset(&sig.sa_mask);
1472         sig.sa_flags = 0;
1473
1474         r = sigaction(signo, &sig, &osig);
1475
1476         if (r < 0)
1477                 return (SIG_ERR);
1478         else
1479                 return (osig.sa_handler);
1480 }
1481
1482 static void
1483 sighup (int sig)
1484 {
1485         condlog(2, "reconfigure (SIGHUP)");
1486
1487         if (running_state != DAEMON_RUNNING)
1488                 return;
1489
1490         reconfigure(gvecs);
1491
1492 #ifdef _DEBUG_
1493         dbg_free_final(NULL);
1494 #endif
1495 }
1496
1497 static void
1498 sigend (int sig)
1499 {
1500         exit_daemon(0);
1501 }
1502
1503 static void
1504 sigusr1 (int sig)
1505 {
1506         condlog(3, "SIGUSR1 received");
1507 }
1508
1509 static void
1510 signal_init(void)
1511 {
1512         signal_set(SIGHUP, sighup);
1513         signal_set(SIGUSR1, sigusr1);
1514         signal_set(SIGINT, sigend);
1515         signal_set(SIGTERM, sigend);
1516         signal(SIGPIPE, SIG_IGN);
1517 }
1518
1519 static void
1520 setscheduler (void)
1521 {
1522         int res;
1523         static struct sched_param sched_param = {
1524                 .sched_priority = 99
1525         };
1526
1527         res = sched_setscheduler (0, SCHED_RR, &sched_param);
1528
1529         if (res == -1)
1530                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1531         return;
1532 }
1533
1534 static void
1535 set_oom_adj (void)
1536 {
1537 #ifdef OOM_SCORE_ADJ_MIN
1538         int retry = 1;
1539         char *file = "/proc/self/oom_score_adj";
1540         int score = OOM_SCORE_ADJ_MIN;
1541 #else
1542         int retry = 0;
1543         char *file = "/proc/self/oom_adj";
1544         int score = OOM_ADJUST_MIN;
1545 #endif
1546         FILE *fp;
1547         struct stat st;
1548
1549         do {
1550                 if (stat(file, &st) == 0){
1551                         fp = fopen(file, "w");
1552                         if (!fp) {
1553                                 condlog(0, "couldn't fopen %s : %s", file,
1554                                         strerror(errno));
1555                                 return;
1556                         }
1557                         fprintf(fp, "%i", score);
1558                         fclose(fp);
1559                         return;
1560                 }
1561                 if (errno != ENOENT) {
1562                         condlog(0, "couldn't stat %s : %s", file,
1563                                 strerror(errno));
1564                         return;
1565                 }
1566 #ifdef OOM_ADJUST_MIN
1567                 file = "/proc/self/oom_adj";
1568                 score = OOM_ADJUST_MIN;
1569 #else
1570                 retry = 0;
1571 #endif
1572         } while (retry--);
1573         condlog(0, "couldn't adjust oom score");
1574 }
1575
1576 static int
1577 child (void * param)
1578 {
1579         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1580         pthread_attr_t log_attr, misc_attr;
1581         struct vectors * vecs;
1582         struct multipath * mpp;
1583         int i;
1584         sigset_t set;
1585         int rc, pid_rc;
1586
1587         mlockall(MCL_CURRENT | MCL_FUTURE);
1588
1589         setup_thread_attr(&misc_attr, 64 * 1024, 1);
1590         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1591
1592         if (logsink) {
1593                 setup_thread_attr(&log_attr, 64 * 1024, 0);
1594                 log_thread_start(&log_attr);
1595                 pthread_attr_destroy(&log_attr);
1596         }
1597
1598         running_state = DAEMON_START;
1599
1600         condlog(2, "--------start up--------");
1601         condlog(2, "read " DEFAULT_CONFIGFILE);
1602
1603         if (load_config(DEFAULT_CONFIGFILE))
1604                 exit(1);
1605
1606         if (init_checkers()) {
1607                 condlog(0, "failed to initialize checkers");
1608                 exit(1);
1609         }
1610         if (init_prio()) {
1611                 condlog(0, "failed to initialize prioritizers");
1612                 exit(1);
1613         }
1614
1615         setlogmask(LOG_UPTO(conf->verbosity + 3));
1616
1617         if (conf->max_fds) {
1618                 struct rlimit fd_limit;
1619
1620                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1621                         condlog(0, "can't get open fds limit: %s",
1622                                 strerror(errno));
1623                         fd_limit.rlim_cur = 0;
1624                         fd_limit.rlim_max = 0;
1625                 }
1626                 if (fd_limit.rlim_cur < conf->max_fds) {
1627                         fd_limit.rlim_cur = conf->max_fds;
1628                         if (fd_limit.rlim_max < conf->max_fds)
1629                                 fd_limit.rlim_max = conf->max_fds;
1630                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1631                                 condlog(0, "can't set open fds limit to "
1632                                         "%lu/%lu : %s",
1633                                         fd_limit.rlim_cur, fd_limit.rlim_max,
1634                                         strerror(errno));
1635                         } else {
1636                                 condlog(3, "set open fds limit to %lu/%lu",
1637                                         fd_limit.rlim_cur, fd_limit.rlim_max);
1638                         }
1639                 }
1640
1641         }
1642
1643         vecs = gvecs = init_vecs();
1644         if (!vecs)
1645                 exit(1);
1646
1647         signal_init();
1648         setscheduler();
1649         set_oom_adj();
1650
1651         conf->daemon = 1;
1652         udev_set_sync_support(0);
1653         /*
1654          * Start uevent listener early to catch events
1655          */
1656         if ((rc = pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs))) {
1657                 condlog(0, "failed to create uevent thread: %d", rc);
1658                 exit(1);
1659         }
1660         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1661                 condlog(0, "failed to create cli listener: %d", rc);
1662                 exit(1);
1663         }
1664         /*
1665          * fetch and configure both paths and multipaths
1666          */
1667         running_state = DAEMON_CONFIGURE;
1668
1669         lock(vecs->lock);
1670         if (configure(vecs, 1)) {
1671                 unlock(vecs->lock);
1672                 condlog(0, "failure during configuration");
1673                 exit(1);
1674         }
1675         unlock(vecs->lock);
1676
1677         /*
1678          * start threads
1679          */
1680         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1681                 condlog(0,"failed to create checker loop thread: %d", rc);
1682                 exit(1);
1683         }
1684         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1685                 condlog(0, "failed to create uevent dispatcher: %d", rc);
1686                 exit(1);
1687         }
1688         pthread_attr_destroy(&misc_attr);
1689
1690         pthread_mutex_lock(&exit_mutex);
1691         /* Startup complete, create logfile */
1692         pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1693         /* Ignore errors, we can live without */
1694
1695         running_state = DAEMON_RUNNING;
1696         pthread_cond_wait(&exit_cond, &exit_mutex);
1697         /* Need to block these to avoid deadlocking */
1698         sigemptyset(&set);
1699         sigaddset(&set, SIGTERM);
1700         sigaddset(&set, SIGINT);
1701         pthread_sigmask(SIG_BLOCK, &set, NULL);
1702
1703         /*
1704          * exit path
1705          */
1706         running_state = DAEMON_SHUTDOWN;
1707         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1708         block_signal(SIGHUP, NULL);
1709         lock(vecs->lock);
1710         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1711                 vector_foreach_slot(vecs->mpvec, mpp, i)
1712                         dm_queue_if_no_path(mpp->alias, 0);
1713         remove_maps_and_stop_waiters(vecs);
1714         unlock(vecs->lock);
1715
1716         pthread_cancel(check_thr);
1717         pthread_cancel(uevent_thr);
1718         pthread_cancel(uxlsnr_thr);
1719         pthread_cancel(uevq_thr);
1720
1721         lock(vecs->lock);
1722         free_pathvec(vecs->pathvec, FREE_PATHS);
1723         vecs->pathvec = NULL;
1724         unlock(vecs->lock);
1725         /* Now all the waitevent threads will start rushing in. */
1726         while (vecs->lock.depth > 0) {
1727                 sleep (1); /* This is weak. */
1728                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1729                         " waiting...", vecs->lock.depth);
1730         }
1731         pthread_mutex_destroy(vecs->lock.mutex);
1732         FREE(vecs->lock.mutex);
1733         vecs->lock.depth = 0;
1734         vecs->lock.mutex = NULL;
1735         FREE(vecs);
1736         vecs = NULL;
1737
1738         cleanup_checkers();
1739         cleanup_prio();
1740
1741         dm_lib_release();
1742         dm_lib_exit();
1743
1744         /* We're done here */
1745         if (!pid_rc) {
1746                 condlog(3, "unlink pidfile");
1747                 unlink(DEFAULT_PIDFILE);
1748         }
1749
1750         condlog(2, "--------shut down-------");
1751
1752         if (logsink)
1753                 log_thread_stop();
1754
1755         /*
1756          * Freeing config must be done after condlog() and dm_lib_exit(),
1757          * because logging functions like dlog() and dm_write_log()
1758          * reference the config.
1759          */
1760         free_config(conf);
1761         conf = NULL;
1762
1763 #ifdef _DEBUG_
1764         dbg_free_final(NULL);
1765 #endif
1766
1767         exit(0);
1768 }
1769
1770 static int
1771 daemonize(void)
1772 {
1773         int pid;
1774         int dev_null_fd;
1775
1776         if( (pid = fork()) < 0){
1777                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1778                 return -1;
1779         }
1780         else if (pid != 0)
1781                 return pid;
1782
1783         setsid();
1784
1785         if ( (pid = fork()) < 0)
1786                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1787         else if (pid != 0)
1788                 _exit(0);
1789
1790         if (chdir("/") < 0)
1791                 fprintf(stderr, "cannot chdir to '/', continuing\n");
1792
1793         dev_null_fd = open("/dev/null", O_RDWR);
1794         if (dev_null_fd < 0){
1795                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1796                         strerror(errno));
1797                 _exit(0);
1798         }
1799
1800         close(STDIN_FILENO);
1801         if (dup(dev_null_fd) < 0) {
1802                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1803                         strerror(errno));
1804                 _exit(0);
1805         }
1806         close(STDOUT_FILENO);
1807         if (dup(dev_null_fd) < 0) {
1808                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1809                         strerror(errno));
1810                 _exit(0);
1811         }
1812         close(STDERR_FILENO);
1813         if (dup(dev_null_fd) < 0) {
1814                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1815                         strerror(errno));
1816                 _exit(0);
1817         }
1818         close(dev_null_fd);
1819         daemon_pid = getpid();
1820         return 0;
1821 }
1822
1823 int
1824 main (int argc, char *argv[])
1825 {
1826         extern char *optarg;
1827         extern int optind;
1828         int arg;
1829         int err;
1830
1831         logsink = 1;
1832         running_state = DAEMON_INIT;
1833         dm_init();
1834
1835         if (getuid() != 0) {
1836                 fprintf(stderr, "need to be root\n");
1837                 exit(1);
1838         }
1839
1840         /* make sure we don't lock any path */
1841         if (chdir("/") < 0)
1842                 fprintf(stderr, "can't chdir to root directory : %s\n",
1843                         strerror(errno));
1844         umask(umask(077) | 022);
1845
1846         conf = alloc_config();
1847
1848         if (!conf)
1849                 exit(1);
1850
1851         while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1852         switch(arg) {
1853                 case 'd':
1854                         logsink = 0;
1855                         //debug=1; /* ### comment me out ### */
1856                         break;
1857                 case 'v':
1858                         if (sizeof(optarg) > sizeof(char *) ||
1859                             !isdigit(optarg[0]))
1860                                 exit(1);
1861
1862                         conf->verbosity = atoi(optarg);
1863                         break;
1864                 case 'k':
1865                         uxclnt(optarg);
1866                         exit(0);
1867                 default:
1868                         ;
1869                 }
1870         }
1871         if (optind < argc) {
1872                 char cmd[CMDSIZE];
1873                 char * s = cmd;
1874                 char * c = s;
1875
1876                 while (optind < argc) {
1877                         if (strchr(argv[optind], ' '))
1878                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1879                         else
1880                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1881                         optind++;
1882                 }
1883                 c += snprintf(c, s + CMDSIZE - c, "\n");
1884                 uxclnt(s);
1885                 exit(0);
1886         }
1887
1888         if (!logsink)
1889                 err = 0;
1890         else
1891                 err = daemonize();
1892
1893         if (err < 0)
1894                 /* error */
1895                 exit(1);
1896         else if (err > 0)
1897                 /* parent dies */
1898                 exit(0);
1899         else
1900                 /* child lives */
1901                 return (child(NULL));
1902 }
1903
1904 void *  mpath_pr_event_handler_fn (void * pathp )
1905 {
1906         struct multipath * mpp;
1907         int i,j, ret, isFound;
1908         struct path * pp = (struct path *)pathp;
1909         unsigned char *keyp;
1910         uint64_t prkey;
1911         struct prout_param_descriptor *param;
1912         struct prin_resp *resp;
1913
1914         mpp = pp->mpp;
1915
1916         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1917         if (!resp){
1918                 condlog(0,"%s Alloc failed for prin response", pp->dev);
1919                 return NULL;
1920         }
1921
1922         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1923         if (ret != MPATH_PR_SUCCESS )
1924         {
1925                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1926                 goto out;
1927         }
1928
1929         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1930                         resp->prin_descriptor.prin_readkeys.additional_length );
1931
1932         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1933         {
1934                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1935                 ret = MPATH_PR_SUCCESS;
1936                 goto out;
1937         }
1938         prkey = 0;
1939         keyp = (unsigned char *)mpp->reservation_key;
1940         for (j = 0; j < 8; ++j) {
1941                 if (j > 0)
1942                         prkey <<= 8;
1943                 prkey |= *keyp;
1944                 ++keyp;
1945         }
1946         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
1947
1948         isFound =0;
1949         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1950         {
1951                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
1952                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1953                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1954                 {
1955                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1956                         isFound =1;
1957                         break;
1958                 }
1959         }
1960         if (!isFound)
1961         {
1962                 condlog(0, "%s: Either device not registered or ", pp->dev);
1963                 condlog(0, "host is not authorised for registration. Skip path");
1964                 ret = MPATH_PR_OTHER;
1965                 goto out;
1966         }
1967
1968         param= malloc(sizeof(struct prout_param_descriptor));
1969         memset(param, 0 , sizeof(struct prout_param_descriptor));
1970
1971         for (j = 7; j >= 0; --j) {
1972                 param->sa_key[j] = (prkey & 0xff);
1973                 prkey >>= 8;
1974         }
1975         param->num_transportid = 0;
1976
1977         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1978
1979         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1980         if (ret != MPATH_PR_SUCCESS )
1981         {
1982                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1983         }
1984         mpp->prflag = 1;
1985
1986         free(param);
1987 out:
1988         free(resp);
1989         return NULL;
1990 }
1991
1992 int mpath_pr_event_handle(struct path *pp)
1993 {
1994         pthread_t thread;
1995         int rc;
1996         pthread_attr_t attr;
1997         struct multipath * mpp;
1998
1999         mpp = pp->mpp;
2000
2001         if (!mpp->reservation_key)
2002                 return -1;
2003
2004         pthread_attr_init(&attr);
2005         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2006
2007         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2008         if (rc) {
2009                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2010                 return -1;
2011         }
2012         pthread_attr_destroy(&attr);
2013         rc = pthread_join(thread, NULL);
2014         return 0;
2015 }
2016