8564819d03e219fb654c0c0acba1f0ade803cbb2
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <mpath_persist.h>
21
22 /*
23  * libcheckers
24  */
25 #include <checkers.h>
26
27 /*
28  * libmultipath
29  */
30 #include <parser.h>
31 #include <vector.h>
32 #include <memory.h>
33 #include <config.h>
34 #include <util.h>
35 #include <hwtable.h>
36 #include <defaults.h>
37 #include <structs.h>
38 #include <blacklist.h>
39 #include <structs_vec.h>
40 #include <dmparser.h>
41 #include <devmapper.h>
42 #include <sysfs.h>
43 #include <dict.h>
44 #include <discovery.h>
45 #include <debug.h>
46 #include <propsel.h>
47 #include <uevent.h>
48 #include <switchgroup.h>
49 #include <print.h>
50 #include <configure.h>
51 #include <prio.h>
52 #include <pgpolicies.h>
53 #include <uevent.h>
54
55 #include "main.h"
56 #include "pidfile.h"
57 #include "uxlsnr.h"
58 #include "uxclnt.h"
59 #include "cli.h"
60 #include "cli_handlers.h"
61 #include "lock.h"
62 #include "waiter.h"
63
64 #define FILE_NAME_SIZE 256
65 #define CMDSIZE 160
66
67 #define LOG_MSG(a, b) \
68 do { \
69         if (pp->offline) \
70                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
71         else if (strlen(b)) \
72                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
73 } while(0)
74
75 struct mpath_event_param
76 {
77         char * devname;
78         struct multipath *mpp;
79 };
80
81 unsigned int mpath_mx_alloc_len;
82
83 pthread_cond_t exit_cond = PTHREAD_COND_INITIALIZER;
84 pthread_mutex_t exit_mutex = PTHREAD_MUTEX_INITIALIZER;
85
86 int logsink;
87 enum daemon_status running_state;
88 pid_t daemon_pid;
89
90 /*
91  * global copy of vecs for use in sig handlers
92  */
93 struct vectors * gvecs;
94
95 static int
96 need_switch_pathgroup (struct multipath * mpp, int refresh)
97 {
98         struct pathgroup * pgp;
99         struct path * pp;
100         unsigned int i, j;
101
102         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
103                 return 0;
104
105         /*
106          * Refresh path priority values
107          */
108         if (refresh)
109                 vector_foreach_slot (mpp->pg, pgp, i)
110                         vector_foreach_slot (pgp->paths, pp, j)
111                                 pathinfo(pp, conf->hwtable, DI_PRIO);
112
113         mpp->bestpg = select_path_group(mpp);
114
115         if (mpp->bestpg != mpp->nextpg)
116                 return 1;
117
118         return 0;
119 }
120
121 static void
122 switch_pathgroup (struct multipath * mpp)
123 {
124         mpp->stat_switchgroup++;
125         dm_switchgroup(mpp->alias, mpp->bestpg);
126         condlog(2, "%s: switch to path group #%i",
127                  mpp->alias, mpp->bestpg);
128 }
129
130 static int
131 coalesce_maps(struct vectors *vecs, vector nmpv)
132 {
133         struct multipath * ompp;
134         vector ompv = vecs->mpvec;
135         unsigned int i;
136         int j;
137
138         vector_foreach_slot (ompv, ompp, i) {
139                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
140                         /*
141                          * remove all current maps not allowed by the
142                          * current configuration
143                          */
144                         if (dm_flush_map(ompp->alias)) {
145                                 condlog(0, "%s: unable to flush devmap",
146                                         ompp->alias);
147                                 /*
148                                  * may be just because the device is open
149                                  */
150                                 if (!vector_alloc_slot(nmpv))
151                                         return 1;
152
153                                 vector_set_slot(nmpv, ompp);
154                                 setup_multipath(vecs, ompp);
155
156                                 if ((j = find_slot(ompv, (void *)ompp)) != -1)
157                                         vector_del_slot(ompv, j);
158
159                                 continue;
160                         }
161                         else {
162                                 dm_lib_release();
163                                 condlog(2, "%s devmap removed", ompp->alias);
164                         }
165                 } else if (conf->reassign_maps) {
166                         condlog(3, "%s: Reassign existing device-mapper"
167                                 " devices", ompp->alias);
168                         dm_reassign(ompp->alias);
169                 }
170         }
171         return 0;
172 }
173
174 void
175 sync_map_state(struct multipath *mpp)
176 {
177         struct pathgroup *pgp;
178         struct path *pp;
179         unsigned int i, j;
180
181         if (!mpp->pg)
182                 return;
183
184         vector_foreach_slot (mpp->pg, pgp, i){
185                 vector_foreach_slot (pgp->paths, pp, j){
186                         if (pp->state == PATH_UNCHECKED || 
187                             pp->state == PATH_WILD)
188                                 continue;
189                         if ((pp->dmstate == PSTATE_FAILED ||
190                              pp->dmstate == PSTATE_UNDEF) &&
191                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
192                                 dm_reinstate_path(mpp->alias, pp->dev_t);
193                         else if ((pp->dmstate == PSTATE_ACTIVE ||
194                                   pp->dmstate == PSTATE_UNDEF) &&
195                                  (pp->state == PATH_DOWN ||
196                                   pp->state == PATH_SHAKY))
197                                 dm_fail_path(mpp->alias, pp->dev_t);
198                 }
199         }
200 }
201
202 static void
203 sync_maps_state(vector mpvec)
204 {
205         unsigned int i;
206         struct multipath *mpp;
207
208         vector_foreach_slot (mpvec, mpp, i)
209                 sync_map_state(mpp);
210 }
211
212 static int
213 flush_map(struct multipath * mpp, struct vectors * vecs)
214 {
215         /*
216          * clear references to this map before flushing so we can ignore
217          * the spurious uevent we may generate with the dm_flush_map call below
218          */
219         if (dm_flush_map(mpp->alias)) {
220                 /*
221                  * May not really be an error -- if the map was already flushed
222                  * from the device mapper by dmsetup(8) for instance.
223                  */
224                 condlog(0, "%s: can't flush", mpp->alias);
225                 return 1;
226         }
227         else {
228                 dm_lib_release();
229                 condlog(2, "%s: devmap removed", mpp->alias);
230         }
231
232         orphan_paths(vecs->pathvec, mpp);
233         remove_map_and_stop_waiter(mpp, vecs, 1);
234
235         return 0;
236 }
237
238 static int
239 uev_add_map (struct uevent * uev, struct vectors * vecs)
240 {
241         char *alias;
242         int major = -1, minor = -1, rc;
243
244         condlog(3, "%s: add map (uevent)", uev->kernel);
245         alias = uevent_get_dm_name(uev);
246         if (!alias) {
247                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
248                 major = uevent_get_major(uev);
249                 minor = uevent_get_minor(uev);
250                 alias = dm_mapname(major, minor);
251                 if (!alias) {
252                         condlog(2, "%s: mapname not found for %d:%d",
253                                 uev->kernel, major, minor);
254                         return 1;
255                 }
256         }
257         rc = ev_add_map(uev->kernel, alias, vecs);
258         FREE(alias);
259         return rc;
260 }
261
262 int
263 ev_add_map (char * dev, char * alias, struct vectors * vecs)
264 {
265         char * refwwid;
266         struct multipath * mpp;
267         int map_present;
268         int r = 1;
269
270         map_present = dm_map_present(alias);
271
272         if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
273                 condlog(4, "%s: not a multipath map", alias);
274                 return 0;
275         }
276
277         mpp = find_mp_by_alias(vecs->mpvec, alias);
278
279         if (mpp) {
280                 /*
281                  * Not really an error -- we generate our own uevent
282                  * if we create a multipath mapped device as a result
283                  * of uev_add_path
284                  */
285                 if (conf->reassign_maps) {
286                         condlog(3, "%s: Reassign existing device-mapper devices",
287                                 alias);
288                         dm_reassign(alias);
289                 }
290                 return 0;
291         }
292         condlog(2, "%s: adding map", alias);
293
294         /*
295          * now we can register the map
296          */
297         if (map_present && (mpp = add_map_without_path(vecs, alias))) {
298                 sync_map_state(mpp);
299                 condlog(2, "%s: devmap %s registered", alias, dev);
300                 return 0;
301         }
302         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
303
304         if (refwwid) {
305                 r = coalesce_paths(vecs, NULL, refwwid, 0);
306                 dm_lib_release();
307         }
308
309         if (!r)
310                 condlog(2, "%s: devmap %s added", alias, dev);
311         else if (r == 2)
312                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
313         else
314                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
315
316         FREE(refwwid);
317         return r;
318 }
319
320 static int
321 uev_remove_map (struct uevent * uev, struct vectors * vecs)
322 {
323         char *alias;
324         int minor;
325         struct multipath *mpp;
326
327         condlog(2, "%s: remove map (uevent)", uev->kernel);
328         alias = uevent_get_dm_name(uev);
329         if (!alias) {
330                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
331                 return 0;
332         }
333         minor = uevent_get_minor(uev);
334         mpp = find_mp_by_minor(vecs->mpvec, minor);
335
336         if (!mpp) {
337                 condlog(2, "%s: devmap not registered, can't remove",
338                         uev->kernel);
339                 goto out;
340         }
341         if (strcmp(mpp->alias, alias)) {
342                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
343                         mpp->alias, mpp->dmi->minor, minor);
344                 goto out;
345         }
346
347         orphan_paths(vecs->pathvec, mpp);
348         remove_map_and_stop_waiter(mpp, vecs, 1);
349 out:
350         FREE(alias);
351         return 0;
352 }
353
354 int
355 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
356 {
357         struct multipath * mpp;
358
359         mpp = find_mp_by_minor(vecs->mpvec, minor);
360
361         if (!mpp) {
362                 condlog(2, "%s: devmap not registered, can't remove",
363                         devname);
364                 return 0;
365         }
366         if (strcmp(mpp->alias, alias)) {
367                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
368                         mpp->alias, mpp->dmi->minor, minor);
369                 return 0;
370         }
371         return flush_map(mpp, vecs);
372 }
373
374 static int
375 uev_add_path (struct uevent *uev, struct vectors * vecs)
376 {
377         struct path *pp;
378         int ret, i;
379
380         condlog(2, "%s: add path (uevent)", uev->kernel);
381         if (strstr(uev->kernel, "..") != NULL) {
382                 /*
383                  * Don't allow relative device names in the pathvec
384                  */
385                 condlog(0, "%s: path name is invalid", uev->kernel);
386                 return 1;
387         }
388
389         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
390         if (pp) {
391                 condlog(0, "%s: spurious uevent, path already in pathvec",
392                         uev->kernel);
393                 if (pp->mpp)
394                         return 0;
395                 if (!strlen(pp->wwid)) {
396                         udev_device_unref(pp->udev);
397                         pp->udev = udev_device_ref(uev->udev);
398                         ret = pathinfo(pp, conf->hwtable,
399                                        DI_ALL | DI_BLACKLIST);
400                         if (ret == 2) {
401                                 i = find_slot(vecs->pathvec, (void *)pp);
402                                 if (i != -1)
403                                         vector_del_slot(vecs->pathvec, i);
404                                 free_path(pp);
405                                 return 0;
406                         } else if (ret == 1) {
407                                 condlog(0, "%s: failed to reinitialize path",
408                                         uev->kernel);
409                                 return 1;
410                         }
411                 }
412         } else {
413                 /*
414                  * get path vital state
415                  */
416                 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
417                                      uev->udev, DI_ALL, &pp);
418                 if (!pp) {
419                         if (ret == 2)
420                                 return 0;
421                         condlog(0, "%s: failed to store path info",
422                                 uev->kernel);
423                         return 1;
424                 }
425                 pp->checkint = conf->checkint;
426         }
427
428         return ev_add_path(pp, vecs);
429 }
430
431 /*
432  * returns:
433  * 0: added
434  * 1: error
435  */
436 int
437 ev_add_path (struct path * pp, struct vectors * vecs)
438 {
439         struct multipath * mpp;
440         char empty_buff[WWID_SIZE] = {0};
441         char params[PARAMS_SIZE] = {0};
442         int retries = 3;
443         int start_waiter = 0;
444
445         /*
446          * need path UID to go any further
447          */
448         if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
449                 condlog(0, "%s: failed to get path uid", pp->dev);
450                 goto fail; /* leave path added to pathvec */
451         }
452         mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
453 rescan:
454         if (mpp) {
455                 if ((!pp->size) || (mpp->size != pp->size)) {
456                         if (!pp->size)
457                                 condlog(0, "%s: failed to add new path %s, "
458                                         "device size is 0",
459                                         mpp->alias, pp->dev);
460                         else
461                                 condlog(0, "%s: failed to add new path %s, "
462                                         "device size mismatch",
463                                         mpp->alias, pp->dev);
464                         int i = find_slot(vecs->pathvec, (void *)pp);
465                         if (i != -1)
466                                 vector_del_slot(vecs->pathvec, i);
467                         free_path(pp);
468                         return 1;
469                 }
470
471                 condlog(4,"%s: adopting all paths for path %s",
472                         mpp->alias, pp->dev);
473                 if (adopt_paths(vecs->pathvec, mpp, 1))
474                         goto fail; /* leave path added to pathvec */
475
476                 verify_paths(mpp, vecs, NULL);
477                 mpp->flush_on_last_del = FLUSH_UNDEF;
478                 mpp->action = ACT_RELOAD;
479         }
480         else {
481                 if (!pp->size) {
482                         condlog(0, "%s: failed to create new map,"
483                                 " device size is 0 ", pp->dev);
484                         int i = find_slot(vecs->pathvec, (void *)pp);
485                         if (i != -1)
486                                 vector_del_slot(vecs->pathvec, i);
487                         free_path(pp);
488                         return 1;
489                 }
490
491                 condlog(4,"%s: creating new map", pp->dev);
492                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
493                         mpp->action = ACT_CREATE;
494                         /*
495                          * We don't depend on ACT_CREATE, as domap will
496                          * set it to ACT_NOTHING when complete.
497                          */
498                         start_waiter = 1;
499                 }
500                 else
501                         goto fail; /* leave path added to pathvec */
502         }
503
504         /* persistent reseravtion check*/
505         mpath_pr_event_handle(pp);      
506
507         /*
508          * push the map to the device-mapper
509          */
510         if (setup_map(mpp, params, PARAMS_SIZE)) {
511                 condlog(0, "%s: failed to setup map for addition of new "
512                         "path %s", mpp->alias, pp->dev);
513                 goto fail_map;
514         }
515         /*
516          * reload the map for the multipath mapped device
517          */
518         if (domap(mpp, params) <= 0) {
519                 condlog(0, "%s: failed in domap for addition of new "
520                         "path %s", mpp->alias, pp->dev);
521                 /*
522                  * deal with asynchronous uevents :((
523                  */
524                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
525                         condlog(0, "%s: uev_add_path sleep", mpp->alias);
526                         sleep(1);
527                         update_mpp_paths(mpp, vecs->pathvec);
528                         goto rescan;
529                 }
530                 else if (mpp->action == ACT_RELOAD)
531                         condlog(0, "%s: giving up reload", mpp->alias);
532                 else
533                         goto fail_map;
534         }
535         dm_lib_release();
536
537         /*
538          * update our state from kernel regardless of create or reload
539          */
540         if (setup_multipath(vecs, mpp))
541                 goto fail; /* if setup_multipath fails, it removes the map */
542
543         sync_map_state(mpp);
544
545         if ((mpp->action == ACT_CREATE ||
546              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
547             start_waiter_thread(mpp, vecs))
548                         goto fail_map;
549
550         if (retries >= 0) {
551                 condlog(2, "%s [%s]: path added to devmap %s",
552                         pp->dev, pp->dev_t, mpp->alias);
553                 return 0;
554         }
555         else
556                 return 1;
557
558 fail_map:
559         remove_map(mpp, vecs, 1);
560 fail:
561         orphan_path(pp);
562         return 1;
563 }
564
565 static int
566 uev_remove_path (struct uevent *uev, struct vectors * vecs)
567 {
568         struct path *pp;
569
570         condlog(2, "%s: remove path (uevent)", uev->kernel);
571         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
572
573         if (!pp) {
574                 /* Not an error; path might have been purged earlier */
575                 condlog(0, "%s: path already removed", uev->kernel);
576                 return 0;
577         }
578
579         return ev_remove_path(pp, vecs);
580 }
581
582 int
583 ev_remove_path (struct path *pp, struct vectors * vecs)
584 {
585         struct multipath * mpp;
586         int i, retval = 0;
587         char params[PARAMS_SIZE] = {0};
588
589         /*
590          * avoid referring to the map of an orphaned path
591          */
592         if ((mpp = pp->mpp)) {
593                 /*
594                  * transform the mp->pg vector of vectors of paths
595                  * into a mp->params string to feed the device-mapper
596                  */
597                 if (update_mpp_paths(mpp, vecs->pathvec)) {
598                         condlog(0, "%s: failed to update paths",
599                                 mpp->alias);
600                         goto fail;
601                 }
602                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
603                         vector_del_slot(mpp->paths, i);
604
605                 /*
606                  * remove the map IFF removing the last path
607                  */
608                 if (VECTOR_SIZE(mpp->paths) == 0) {
609                         char alias[WWID_SIZE];
610
611                         /*
612                          * flush_map will fail if the device is open
613                          */
614                         strncpy(alias, mpp->alias, WWID_SIZE);
615                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
616                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
617                                 mpp->retry_tick = 0;
618                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
619                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
620                                 dm_queue_if_no_path(mpp->alias, 0);
621                         }
622                         if (!flush_map(mpp, vecs)) {
623                                 condlog(2, "%s: removed map after"
624                                         " removing all paths",
625                                         alias);
626                                 retval = 0;
627                                 goto out;
628                         }
629                         /*
630                          * Not an error, continue
631                          */
632                 }
633
634                 if (setup_map(mpp, params, PARAMS_SIZE)) {
635                         condlog(0, "%s: failed to setup map for"
636                                 " removal of path %s", mpp->alias, pp->dev);
637                         goto fail;
638                 }
639                 /*
640                  * reload the map
641                  */
642                 mpp->action = ACT_RELOAD;
643                 if (domap(mpp, params) <= 0) {
644                         condlog(0, "%s: failed in domap for "
645                                 "removal of path %s",
646                                 mpp->alias, pp->dev);
647                         retval = 1;
648                 } else {
649                         /*
650                          * update our state from kernel
651                          */
652                         if (setup_multipath(vecs, mpp)) {
653                                 goto fail;
654                         }
655                         sync_map_state(mpp);
656
657                         condlog(2, "%s [%s]: path removed from map %s",
658                                 pp->dev, pp->dev_t, mpp->alias);
659                 }
660         }
661
662 out:
663         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
664                 vector_del_slot(vecs->pathvec, i);
665
666         free_path(pp);
667
668         return retval;
669
670 fail:
671         remove_map_and_stop_waiter(mpp, vecs, 1);
672         return 1;
673 }
674
675 static int
676 uev_update_path (struct uevent *uev, struct vectors * vecs)
677 {
678         int ro, retval = 0;
679
680         ro = uevent_get_disk_ro(uev);
681
682         if (ro >= 0) {
683                 struct path * pp;
684
685                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
686                         uev->kernel, ro);
687                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
688                 if (!pp) {
689                         condlog(0, "%s: spurious uevent, path not found",
690                                 uev->kernel);
691                         return 1;
692                 }
693                 if (pp->mpp) {
694                         retval = reload_map(vecs, pp->mpp, 0);
695
696                         condlog(2, "%s: map %s reloaded (retval %d)",
697                                 uev->kernel, pp->mpp->alias, retval);
698                 }
699
700         }
701
702         return retval;
703 }
704
705 static int
706 map_discovery (struct vectors * vecs)
707 {
708         struct multipath * mpp;
709         unsigned int i;
710
711         if (dm_get_maps(vecs->mpvec))
712                 return 1;
713
714         vector_foreach_slot (vecs->mpvec, mpp, i)
715                 if (setup_multipath(vecs, mpp))
716                         return 1;
717
718         return 0;
719 }
720
721 int
722 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
723 {
724         struct vectors * vecs;
725         int r;
726
727         *reply = NULL;
728         *len = 0;
729         vecs = (struct vectors *)trigger_data;
730
731         pthread_cleanup_push(cleanup_lock, &vecs->lock);
732         lock(vecs->lock);
733         pthread_testcancel();
734
735         r = parse_cmd(str, reply, len, vecs);
736
737         if (r > 0) {
738                 *reply = STRDUP("fail\n");
739                 *len = strlen(*reply) + 1;
740                 r = 1;
741         }
742         else if (!r && *len == 0) {
743                 *reply = STRDUP("ok\n");
744                 *len = strlen(*reply) + 1;
745                 r = 0;
746         }
747         /* else if (r < 0) leave *reply alone */
748
749         lock_cleanup_pop(vecs->lock);
750         return r;
751 }
752
753 static int
754 uev_discard(char * devpath)
755 {
756         char *tmp;
757         char a[11], b[11];
758
759         /*
760          * keep only block devices, discard partitions
761          */
762         tmp = strstr(devpath, "/block/");
763         if (tmp == NULL){
764                 condlog(4, "no /block/ in '%s'", devpath);
765                 return 1;
766         }
767         if (sscanf(tmp, "/block/%10s", a) != 1 ||
768             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
769                 condlog(4, "discard event on %s", devpath);
770                 return 1;
771         }
772         return 0;
773 }
774
775 int
776 uev_trigger (struct uevent * uev, void * trigger_data)
777 {
778         int r = 0;
779         struct vectors * vecs;
780
781         vecs = (struct vectors *)trigger_data;
782
783         if (uev_discard(uev->devpath))
784                 return 0;
785
786         pthread_cleanup_push(cleanup_lock, &vecs->lock);
787         lock(vecs->lock);
788         pthread_testcancel();
789
790         /*
791          * device map event
792          * Add events are ignored here as the tables
793          * are not fully initialised then.
794          */
795         if (!strncmp(uev->kernel, "dm-", 3)) {
796                 if (!strncmp(uev->action, "change", 6)) {
797                         r = uev_add_map(uev, vecs);
798                         goto out;
799                 }
800                 if (!strncmp(uev->action, "remove", 6)) {
801                         r = uev_remove_map(uev, vecs);
802                         goto out;
803                 }
804                 goto out;
805         }
806
807         /*
808          * path add/remove event
809          */
810         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
811                            uev->kernel) > 0)
812                 goto out;
813
814         if (!strncmp(uev->action, "add", 3)) {
815                 r = uev_add_path(uev, vecs);
816                 goto out;
817         }
818         if (!strncmp(uev->action, "remove", 6)) {
819                 r = uev_remove_path(uev, vecs);
820                 goto out;
821         }
822         if (!strncmp(uev->action, "change", 6)) {
823                 r = uev_update_path(uev, vecs);
824                 goto out;
825         }
826
827 out:
828         lock_cleanup_pop(vecs->lock);
829         return r;
830 }
831
832 static void *
833 ueventloop (void * ap)
834 {
835         block_signal(SIGUSR1, NULL);
836         block_signal(SIGHUP, NULL);
837
838         if (uevent_listen())
839                 condlog(0, "error starting uevent listener");
840
841         return NULL;
842 }
843
844 static void *
845 uevqloop (void * ap)
846 {
847         block_signal(SIGUSR1, NULL);
848         block_signal(SIGHUP, NULL);
849
850         if (uevent_dispatch(&uev_trigger, ap))
851                 condlog(0, "error starting uevent dispatcher");
852
853         return NULL;
854 }
855 static void *
856 uxlsnrloop (void * ap)
857 {
858         block_signal(SIGUSR1, NULL);
859         block_signal(SIGHUP, NULL);
860
861         if (cli_init())
862                 return NULL;
863
864         set_handler_callback(LIST+PATHS, cli_list_paths);
865         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
866         set_handler_callback(LIST+MAPS, cli_list_maps);
867         set_handler_callback(LIST+STATUS, cli_list_status);
868         set_handler_callback(LIST+DAEMON, cli_list_daemon);
869         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
870         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
871         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
872         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
873         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
874         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
875         set_handler_callback(LIST+CONFIG, cli_list_config);
876         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
877         set_handler_callback(LIST+DEVICES, cli_list_devices);
878         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
879         set_handler_callback(ADD+PATH, cli_add_path);
880         set_handler_callback(DEL+PATH, cli_del_path);
881         set_handler_callback(ADD+MAP, cli_add_map);
882         set_handler_callback(DEL+MAP, cli_del_map);
883         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
884         set_handler_callback(RECONFIGURE, cli_reconfigure);
885         set_handler_callback(SUSPEND+MAP, cli_suspend);
886         set_handler_callback(RESUME+MAP, cli_resume);
887         set_handler_callback(RESIZE+MAP, cli_resize);
888         set_handler_callback(RELOAD+MAP, cli_reload);
889         set_handler_callback(RESET+MAP, cli_reassign);
890         set_handler_callback(REINSTATE+PATH, cli_reinstate);
891         set_handler_callback(FAIL+PATH, cli_fail);
892         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
893         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
894         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
895         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
896         set_handler_callback(QUIT, cli_quit);
897         set_handler_callback(SHUTDOWN, cli_shutdown);
898         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
899         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
900         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
901
902         umask(077);
903         uxsock_listen(&uxsock_trigger, ap);
904
905         return NULL;
906 }
907
908 int
909 exit_daemon (int status)
910 {
911         if (status != 0)
912                 fprintf(stderr, "bad exit status. see daemon.log\n");
913
914         if (running_state != DAEMON_SHUTDOWN) {
915                 pthread_mutex_lock(&exit_mutex);
916                 pthread_cond_signal(&exit_cond);
917                 pthread_mutex_unlock(&exit_mutex);
918         }
919         return status;
920 }
921
922 const char *
923 daemon_status(void)
924 {
925         switch (running_state) {
926         case DAEMON_INIT:
927                 return "init";
928         case DAEMON_START:
929                 return "startup";
930         case DAEMON_CONFIGURE:
931                 return "configure";
932         case DAEMON_RUNNING:
933                 return "running";
934         case DAEMON_SHUTDOWN:
935                 return "shutdown";
936         }
937         return NULL;
938 }
939
940 static void
941 fail_path (struct path * pp, int del_active)
942 {
943         if (!pp->mpp)
944                 return;
945
946         condlog(2, "checker failed path %s in map %s",
947                  pp->dev_t, pp->mpp->alias);
948
949         dm_fail_path(pp->mpp->alias, pp->dev_t);
950         if (del_active)
951                 update_queue_mode_del_path(pp->mpp);
952 }
953
954 /*
955  * caller must have locked the path list before calling that function
956  */
957 static void
958 reinstate_path (struct path * pp, int add_active)
959 {
960         if (!pp->mpp)
961                 return;
962
963         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
964                 condlog(0, "%s: reinstate failed", pp->dev_t);
965         else {
966                 condlog(2, "%s: reinstated", pp->dev_t);
967                 if (add_active)
968                         update_queue_mode_add_path(pp->mpp);
969         }
970 }
971
972 static void
973 enable_group(struct path * pp)
974 {
975         struct pathgroup * pgp;
976
977         /*
978          * if path is added through uev_add_path, pgindex can be unset.
979          * next update_strings() will set it, upon map reload event.
980          *
981          * we can safely return here, because upon map reload, all
982          * PG will be enabled.
983          */
984         if (!pp->mpp->pg || !pp->pgindex)
985                 return;
986
987         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
988
989         if (pgp->status == PGSTATE_DISABLED) {
990                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
991                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
992         }
993 }
994
995 static void
996 mpvec_garbage_collector (struct vectors * vecs)
997 {
998         struct multipath * mpp;
999         unsigned int i;
1000
1001         if (!vecs->mpvec)
1002                 return;
1003
1004         vector_foreach_slot (vecs->mpvec, mpp, i) {
1005                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
1006                         condlog(2, "%s: remove dead map", mpp->alias);
1007                         remove_map_and_stop_waiter(mpp, vecs, 1);
1008                         i--;
1009                 }
1010         }
1011 }
1012
1013 /* This is called after a path has started working again. It the multipath
1014  * device for this path uses the followover failback type, and this is the
1015  * best pathgroup, and this is the first path in the pathgroup to come back
1016  * up, then switch to this pathgroup */
1017 static int
1018 followover_should_failback(struct path * pp)
1019 {
1020         struct pathgroup * pgp;
1021         struct path *pp1;
1022         int i;
1023
1024         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1025             !pp->mpp->pg || !pp->pgindex ||
1026             pp->pgindex != pp->mpp->bestpg)
1027                 return 0;
1028
1029         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1030         vector_foreach_slot(pgp->paths, pp1, i) {
1031                 if (pp1 == pp)
1032                         continue;
1033                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1034                         return 0;
1035         }
1036         return 1;
1037 }
1038
1039 static void
1040 defered_failback_tick (vector mpvec)
1041 {
1042         struct multipath * mpp;
1043         unsigned int i;
1044
1045         vector_foreach_slot (mpvec, mpp, i) {
1046                 /*
1047                  * defered failback getting sooner
1048                  */
1049                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1050                         mpp->failback_tick--;
1051
1052                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1053                                 switch_pathgroup(mpp);
1054                 }
1055         }
1056 }
1057
1058 static void
1059 retry_count_tick(vector mpvec)
1060 {
1061         struct multipath *mpp;
1062         unsigned int i;
1063
1064         vector_foreach_slot (mpvec, mpp, i) {
1065                 if (mpp->retry_tick) {
1066                         mpp->stat_total_queueing_time++;
1067                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1068                         if(--mpp->retry_tick == 0) {
1069                                 dm_queue_if_no_path(mpp->alias, 0);
1070                                 condlog(2, "%s: Disable queueing", mpp->alias);
1071                         }
1072                 }
1073         }
1074 }
1075
1076 int update_prio(struct path *pp, int refresh_all)
1077 {
1078         int oldpriority;
1079         struct path *pp1;
1080         struct pathgroup * pgp;
1081         int i, j, changed = 0;
1082
1083         if (refresh_all) {
1084                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1085                         vector_foreach_slot (pgp->paths, pp1, j) {
1086                                 oldpriority = pp1->priority;
1087                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1088                                 if (pp1->priority != oldpriority)
1089                                         changed = 1;
1090                         }
1091                 }
1092                 return changed;
1093         }
1094         oldpriority = pp->priority;
1095         pathinfo(pp, conf->hwtable, DI_PRIO);
1096
1097         if (pp->priority == oldpriority)
1098                 return 0;
1099         return 1;
1100 }
1101
1102 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1103 {
1104         if (reload_map(vecs, mpp, refresh))
1105                 return 1;
1106
1107         dm_lib_release();
1108         if (setup_multipath(vecs, mpp) != 0)
1109                 return 1;
1110         sync_map_state(mpp);
1111
1112         return 0;
1113 }
1114
1115 void
1116 check_path (struct vectors * vecs, struct path * pp)
1117 {
1118         int newstate;
1119         int new_path_up = 0;
1120         int chkr_new_path_up = 0;
1121         int oldchkrstate = pp->chkrstate;
1122
1123         if (!pp->mpp)
1124                 return;
1125
1126         if (pp->tick && --pp->tick)
1127                 return; /* don't check this path yet */
1128
1129         /*
1130          * provision a next check soonest,
1131          * in case we exit abnormaly from here
1132          */
1133         pp->tick = conf->checkint;
1134
1135         newstate = path_offline(pp);
1136         if (newstate == PATH_UP)
1137                 newstate = get_state(pp, 1);
1138
1139         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1140                 condlog(2, "%s: unusable path", pp->dev);
1141                 pathinfo(pp, conf->hwtable, 0);
1142                 return;
1143         }
1144         /*
1145          * Async IO in flight. Keep the previous path state
1146          * and reschedule as soon as possible
1147          */
1148         if (newstate == PATH_PENDING) {
1149                 pp->tick = 1;
1150                 return;
1151         }
1152         /*
1153          * Synchronize with kernel state
1154          */
1155         if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1156                 condlog(1, "%s: Could not synchronize with kernel state",
1157                         pp->dev);
1158                 pp->dmstate = PSTATE_UNDEF;
1159         }
1160         pp->chkrstate = newstate;
1161         if (newstate != pp->state) {
1162                 int oldstate = pp->state;
1163                 pp->state = newstate;
1164                 LOG_MSG(1, checker_message(&pp->checker));
1165
1166                 /*
1167                  * upon state change, reset the checkint
1168                  * to the shortest delay
1169                  */
1170                 pp->checkint = conf->checkint;
1171
1172                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1173                         /*
1174                          * proactively fail path in the DM
1175                          */
1176                         if (oldstate == PATH_UP ||
1177                             oldstate == PATH_GHOST)
1178                                 fail_path(pp, 1);
1179                         else
1180                                 fail_path(pp, 0);
1181
1182                         /*
1183                          * cancel scheduled failback
1184                          */
1185                         pp->mpp->failback_tick = 0;
1186
1187                         pp->mpp->stat_path_failures++;
1188                         return;
1189                 }
1190
1191                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1192                         if ( pp->mpp && pp->mpp->prflag ){
1193                                 /*
1194                                  * Check Persistent Reservation.
1195                                  */
1196                         condlog(2, "%s: checking persistent reservation "
1197                                 "registration", pp->dev);
1198                         mpath_pr_event_handle(pp);
1199                         }
1200                 }
1201
1202                 /*
1203                  * reinstate this path
1204                  */
1205                 if (oldstate != PATH_UP &&
1206                     oldstate != PATH_GHOST)
1207                         reinstate_path(pp, 1);
1208                 else
1209                         reinstate_path(pp, 0);
1210
1211                 new_path_up = 1;
1212
1213                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1214                         chkr_new_path_up = 1;
1215
1216                 /*
1217                  * if at least one path is up in a group, and
1218                  * the group is disabled, re-enable it
1219                  */
1220                 if (newstate == PATH_UP)
1221                         enable_group(pp);
1222         }
1223         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1224                 if (pp->dmstate == PSTATE_FAILED ||
1225                     pp->dmstate == PSTATE_UNDEF) {
1226                         /* Clear IO errors */
1227                         reinstate_path(pp, 0);
1228                 } else {
1229                         LOG_MSG(4, checker_message(&pp->checker));
1230                         if (pp->checkint != conf->max_checkint) {
1231                                 /*
1232                                  * double the next check delay.
1233                                  * max at conf->max_checkint
1234                                  */
1235                                 if (pp->checkint < (conf->max_checkint / 2))
1236                                         pp->checkint = 2 * pp->checkint;
1237                                 else
1238                                         pp->checkint = conf->max_checkint;
1239
1240                                 condlog(4, "%s: delay next check %is",
1241                                         pp->dev_t, pp->checkint);
1242                         }
1243                         pp->tick = pp->checkint;
1244                 }
1245         }
1246         else if (newstate == PATH_DOWN) {
1247                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1248                         LOG_MSG(3, checker_message(&pp->checker));
1249                 else
1250                         LOG_MSG(2, checker_message(&pp->checker));
1251         }
1252
1253         pp->state = newstate;
1254
1255         /*
1256          * path prio refreshing
1257          */
1258         condlog(4, "path prio refresh");
1259
1260         if (update_prio(pp, new_path_up) &&
1261             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1262              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1263                 update_path_groups(pp->mpp, vecs, !new_path_up);
1264         else if (need_switch_pathgroup(pp->mpp, 0)) {
1265                 if (pp->mpp->pgfailback > 0 &&
1266                     (new_path_up || pp->mpp->failback_tick <= 0))
1267                         pp->mpp->failback_tick =
1268                                 pp->mpp->pgfailback + 1;
1269                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1270                          (chkr_new_path_up && followover_should_failback(pp)))
1271                         switch_pathgroup(pp->mpp);
1272         }
1273 }
1274
1275 static void *
1276 checkerloop (void *ap)
1277 {
1278         struct vectors *vecs;
1279         struct path *pp;
1280         int count = 0;
1281         unsigned int i;
1282         sigset_t old;
1283
1284         mlockall(MCL_CURRENT | MCL_FUTURE);
1285         vecs = (struct vectors *)ap;
1286         condlog(2, "path checkers start up");
1287
1288         /*
1289          * init the path check interval
1290          */
1291         vector_foreach_slot (vecs->pathvec, pp, i) {
1292                 pp->checkint = conf->checkint;
1293         }
1294
1295         while (1) {
1296                 block_signal(SIGHUP, &old);
1297                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1298                 lock(vecs->lock);
1299                 pthread_testcancel();
1300                 condlog(4, "tick");
1301
1302                 if (vecs->pathvec) {
1303                         vector_foreach_slot (vecs->pathvec, pp, i) {
1304                                 check_path(vecs, pp);
1305                         }
1306                 }
1307                 if (vecs->mpvec) {
1308                         defered_failback_tick(vecs->mpvec);
1309                         retry_count_tick(vecs->mpvec);
1310                 }
1311                 if (count)
1312                         count--;
1313                 else {
1314                         condlog(4, "map garbage collection");
1315                         mpvec_garbage_collector(vecs);
1316                         count = MAPGCINT;
1317                 }
1318
1319                 lock_cleanup_pop(vecs->lock);
1320                 pthread_sigmask(SIG_SETMASK, &old, NULL);
1321                 sleep(1);
1322         }
1323         return NULL;
1324 }
1325
1326 int
1327 configure (struct vectors * vecs, int start_waiters)
1328 {
1329         struct multipath * mpp;
1330         struct path * pp;
1331         vector mpvec;
1332         int i;
1333
1334         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1335                 return 1;
1336
1337         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1338                 return 1;
1339
1340         if (!(mpvec = vector_alloc()))
1341                 return 1;
1342
1343         /*
1344          * probe for current path (from sysfs) and map (from dm) sets
1345          */
1346         path_discovery(vecs->pathvec, conf, DI_ALL);
1347
1348         vector_foreach_slot (vecs->pathvec, pp, i){
1349                 if (filter_path(conf, pp) > 0){
1350                         vector_del_slot(vecs->pathvec, i);
1351                         free_path(pp);
1352                         i--;
1353                 }
1354                 else
1355                         pp->checkint = conf->checkint;
1356         }
1357         if (map_discovery(vecs))
1358                 return 1;
1359
1360         /*
1361          * create new set of maps & push changed ones into dm
1362          */
1363         if (coalesce_paths(vecs, mpvec, NULL, 1))
1364                 return 1;
1365
1366         /*
1367          * may need to remove some maps which are no longer relevant
1368          * e.g., due to blacklist changes in conf file
1369          */
1370         if (coalesce_maps(vecs, mpvec))
1371                 return 1;
1372
1373         dm_lib_release();
1374
1375         sync_maps_state(mpvec);
1376         vector_foreach_slot(mpvec, mpp, i){
1377                 remember_wwid(mpp->wwid);
1378                 update_map_pr(mpp);
1379         }
1380
1381         /*
1382          * purge dm of old maps
1383          */
1384         remove_maps(vecs);
1385
1386         /*
1387          * save new set of maps formed by considering current path state
1388          */
1389         vector_free(vecs->mpvec);
1390         vecs->mpvec = mpvec;
1391
1392         /*
1393          * start dm event waiter threads for these new maps
1394          */
1395         vector_foreach_slot(vecs->mpvec, mpp, i) {
1396                 if (setup_multipath(vecs, mpp))
1397                         return 1;
1398                 if (start_waiters)
1399                         if (start_waiter_thread(mpp, vecs))
1400                                 return 1;
1401         }
1402         return 0;
1403 }
1404
1405 int
1406 reconfigure (struct vectors * vecs)
1407 {
1408         struct config * old = conf;
1409         int retval = 1;
1410
1411         /*
1412          * free old map and path vectors ... they use old conf state
1413          */
1414         if (VECTOR_SIZE(vecs->mpvec))
1415                 remove_maps_and_stop_waiters(vecs);
1416
1417         if (VECTOR_SIZE(vecs->pathvec))
1418                 free_pathvec(vecs->pathvec, FREE_PATHS);
1419
1420         vecs->pathvec = NULL;
1421         conf = NULL;
1422
1423         if (!load_config(DEFAULT_CONFIGFILE)) {
1424                 conf->verbosity = old->verbosity;
1425                 conf->daemon = 1;
1426                 configure(vecs, 1);
1427                 free_config(old);
1428                 retval = 0;
1429         }
1430
1431         return retval;
1432 }
1433
1434 static struct vectors *
1435 init_vecs (void)
1436 {
1437         struct vectors * vecs;
1438
1439         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1440
1441         if (!vecs)
1442                 return NULL;
1443
1444         vecs->lock.mutex =
1445                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1446
1447         if (!vecs->lock.mutex)
1448                 goto out;
1449
1450         pthread_mutex_init(vecs->lock.mutex, NULL);
1451         vecs->lock.depth = 0;
1452
1453         return vecs;
1454
1455 out:
1456         FREE(vecs);
1457         condlog(0, "failed to init paths");
1458         return NULL;
1459 }
1460
1461 static void *
1462 signal_set(int signo, void (*func) (int))
1463 {
1464         int r;
1465         struct sigaction sig;
1466         struct sigaction osig;
1467
1468         sig.sa_handler = func;
1469         sigemptyset(&sig.sa_mask);
1470         sig.sa_flags = 0;
1471
1472         r = sigaction(signo, &sig, &osig);
1473
1474         if (r < 0)
1475                 return (SIG_ERR);
1476         else
1477                 return (osig.sa_handler);
1478 }
1479
1480 static void
1481 sighup (int sig)
1482 {
1483         condlog(2, "reconfigure (SIGHUP)");
1484
1485         if (running_state != DAEMON_RUNNING)
1486                 return;
1487
1488         reconfigure(gvecs);
1489
1490 #ifdef _DEBUG_
1491         dbg_free_final(NULL);
1492 #endif
1493 }
1494
1495 static void
1496 sigend (int sig)
1497 {
1498         exit_daemon(0);
1499 }
1500
1501 static void
1502 sigusr1 (int sig)
1503 {
1504         condlog(3, "SIGUSR1 received");
1505 }
1506
1507 static void
1508 signal_init(void)
1509 {
1510         signal_set(SIGHUP, sighup);
1511         signal_set(SIGUSR1, sigusr1);
1512         signal_set(SIGINT, sigend);
1513         signal_set(SIGTERM, sigend);
1514         signal(SIGPIPE, SIG_IGN);
1515 }
1516
1517 static void
1518 setscheduler (void)
1519 {
1520         int res;
1521         static struct sched_param sched_param = {
1522                 .sched_priority = 99
1523         };
1524
1525         res = sched_setscheduler (0, SCHED_RR, &sched_param);
1526
1527         if (res == -1)
1528                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1529         return;
1530 }
1531
1532 static void
1533 set_oom_adj (void)
1534 {
1535 #ifdef OOM_SCORE_ADJ_MIN
1536         int retry = 1;
1537         char *file = "/proc/self/oom_score_adj";
1538         int score = OOM_SCORE_ADJ_MIN;
1539 #else
1540         int retry = 0;
1541         char *file = "/proc/self/oom_adj";
1542         int score = OOM_ADJUST_MIN;
1543 #endif
1544         FILE *fp;
1545         struct stat st;
1546
1547         do {
1548                 if (stat(file, &st) == 0){
1549                         fp = fopen(file, "w");
1550                         if (!fp) {
1551                                 condlog(0, "couldn't fopen %s : %s", file,
1552                                         strerror(errno));
1553                                 return;
1554                         }
1555                         fprintf(fp, "%i", score);
1556                         fclose(fp);
1557                         return;
1558                 }
1559                 if (errno != ENOENT) {
1560                         condlog(0, "couldn't stat %s : %s", file,
1561                                 strerror(errno));
1562                         return;
1563                 }
1564 #ifdef OOM_ADJUST_MIN
1565                 file = "/proc/self/oom_adj";
1566                 score = OOM_ADJUST_MIN;
1567 #else
1568                 retry = 0;
1569 #endif
1570         } while (retry--);
1571         condlog(0, "couldn't adjust oom score");
1572 }
1573
1574 static int
1575 child (void * param)
1576 {
1577         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1578         pthread_attr_t log_attr, misc_attr;
1579         struct vectors * vecs;
1580         struct multipath * mpp;
1581         int i;
1582         sigset_t set;
1583         int rc, pid_rc;
1584
1585         mlockall(MCL_CURRENT | MCL_FUTURE);
1586
1587         setup_thread_attr(&misc_attr, 64 * 1024, 1);
1588         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1589
1590         if (logsink) {
1591                 setup_thread_attr(&log_attr, 64 * 1024, 0);
1592                 log_thread_start(&log_attr);
1593                 pthread_attr_destroy(&log_attr);
1594         }
1595
1596         running_state = DAEMON_START;
1597
1598         condlog(2, "--------start up--------");
1599         condlog(2, "read " DEFAULT_CONFIGFILE);
1600
1601         if (load_config(DEFAULT_CONFIGFILE))
1602                 exit(1);
1603
1604         if (init_checkers()) {
1605                 condlog(0, "failed to initialize checkers");
1606                 exit(1);
1607         }
1608         if (init_prio()) {
1609                 condlog(0, "failed to initialize prioritizers");
1610                 exit(1);
1611         }
1612
1613         setlogmask(LOG_UPTO(conf->verbosity + 3));
1614
1615         if (conf->max_fds) {
1616                 struct rlimit fd_limit;
1617
1618                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1619                         condlog(0, "can't get open fds limit: %s",
1620                                 strerror(errno));
1621                         fd_limit.rlim_cur = 0;
1622                         fd_limit.rlim_max = 0;
1623                 }
1624                 if (fd_limit.rlim_cur < conf->max_fds) {
1625                         fd_limit.rlim_cur = conf->max_fds;
1626                         if (fd_limit.rlim_max < conf->max_fds)
1627                                 fd_limit.rlim_max = conf->max_fds;
1628                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1629                                 condlog(0, "can't set open fds limit to "
1630                                         "%lu/%lu : %s",
1631                                         fd_limit.rlim_cur, fd_limit.rlim_max,
1632                                         strerror(errno));
1633                         } else {
1634                                 condlog(3, "set open fds limit to %lu/%lu",
1635                                         fd_limit.rlim_cur, fd_limit.rlim_max);
1636                         }
1637                 }
1638
1639         }
1640
1641         vecs = gvecs = init_vecs();
1642         if (!vecs)
1643                 exit(1);
1644
1645         signal_init();
1646         setscheduler();
1647         set_oom_adj();
1648
1649         conf->daemon = 1;
1650         udev_set_sync_support(0);
1651         /*
1652          * Start uevent listener early to catch events
1653          */
1654         if ((rc = pthread_create(&uevent_thr, &misc_attr, ueventloop, vecs))) {
1655                 condlog(0, "failed to create uevent thread: %d", rc);
1656                 exit(1);
1657         }
1658         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1659                 condlog(0, "failed to create cli listener: %d", rc);
1660                 exit(1);
1661         }
1662         /*
1663          * fetch and configure both paths and multipaths
1664          */
1665         running_state = DAEMON_CONFIGURE;
1666
1667         lock(vecs->lock);
1668         if (configure(vecs, 1)) {
1669                 unlock(vecs->lock);
1670                 condlog(0, "failure during configuration");
1671                 exit(1);
1672         }
1673         unlock(vecs->lock);
1674
1675         /*
1676          * start threads
1677          */
1678         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1679                 condlog(0,"failed to create checker loop thread: %d", rc);
1680                 exit(1);
1681         }
1682         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1683                 condlog(0, "failed to create uevent dispatcher: %d", rc);
1684                 exit(1);
1685         }
1686         pthread_attr_destroy(&misc_attr);
1687
1688         pthread_mutex_lock(&exit_mutex);
1689         /* Startup complete, create logfile */
1690         pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1691         /* Ignore errors, we can live without */
1692
1693         running_state = DAEMON_RUNNING;
1694         pthread_cond_wait(&exit_cond, &exit_mutex);
1695         /* Need to block these to avoid deadlocking */
1696         sigemptyset(&set);
1697         sigaddset(&set, SIGTERM);
1698         sigaddset(&set, SIGINT);
1699         pthread_sigmask(SIG_BLOCK, &set, NULL);
1700
1701         /*
1702          * exit path
1703          */
1704         running_state = DAEMON_SHUTDOWN;
1705         pthread_sigmask(SIG_UNBLOCK, &set, NULL);
1706         block_signal(SIGHUP, NULL);
1707         lock(vecs->lock);
1708         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1709                 vector_foreach_slot(vecs->mpvec, mpp, i)
1710                         dm_queue_if_no_path(mpp->alias, 0);
1711         remove_maps_and_stop_waiters(vecs);
1712         unlock(vecs->lock);
1713
1714         pthread_cancel(check_thr);
1715         pthread_cancel(uevent_thr);
1716         pthread_cancel(uxlsnr_thr);
1717         pthread_cancel(uevq_thr);
1718
1719         lock(vecs->lock);
1720         free_pathvec(vecs->pathvec, FREE_PATHS);
1721         vecs->pathvec = NULL;
1722         unlock(vecs->lock);
1723         /* Now all the waitevent threads will start rushing in. */
1724         while (vecs->lock.depth > 0) {
1725                 sleep (1); /* This is weak. */
1726                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1727                         " waiting...", vecs->lock.depth);
1728         }
1729         pthread_mutex_destroy(vecs->lock.mutex);
1730         FREE(vecs->lock.mutex);
1731         vecs->lock.depth = 0;
1732         vecs->lock.mutex = NULL;
1733         FREE(vecs);
1734         vecs = NULL;
1735
1736         cleanup_checkers();
1737         cleanup_prio();
1738
1739         dm_lib_release();
1740         dm_lib_exit();
1741
1742         /* We're done here */
1743         if (!pid_rc) {
1744                 condlog(3, "unlink pidfile");
1745                 unlink(DEFAULT_PIDFILE);
1746         }
1747
1748         condlog(2, "--------shut down-------");
1749
1750         if (logsink)
1751                 log_thread_stop();
1752
1753         /*
1754          * Freeing config must be done after condlog() and dm_lib_exit(),
1755          * because logging functions like dlog() and dm_write_log()
1756          * reference the config.
1757          */
1758         free_config(conf);
1759         conf = NULL;
1760
1761 #ifdef _DEBUG_
1762         dbg_free_final(NULL);
1763 #endif
1764
1765         exit(0);
1766 }
1767
1768 static int
1769 daemonize(void)
1770 {
1771         int pid;
1772         int dev_null_fd;
1773
1774         if( (pid = fork()) < 0){
1775                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1776                 return -1;
1777         }
1778         else if (pid != 0)
1779                 return pid;
1780
1781         setsid();
1782
1783         if ( (pid = fork()) < 0)
1784                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1785         else if (pid != 0)
1786                 _exit(0);
1787
1788         if (chdir("/") < 0)
1789                 fprintf(stderr, "cannot chdir to '/', continuing\n");
1790
1791         dev_null_fd = open("/dev/null", O_RDWR);
1792         if (dev_null_fd < 0){
1793                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1794                         strerror(errno));
1795                 _exit(0);
1796         }
1797
1798         close(STDIN_FILENO);
1799         if (dup(dev_null_fd) < 0) {
1800                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1801                         strerror(errno));
1802                 _exit(0);
1803         }
1804         close(STDOUT_FILENO);
1805         if (dup(dev_null_fd) < 0) {
1806                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1807                         strerror(errno));
1808                 _exit(0);
1809         }
1810         close(STDERR_FILENO);
1811         if (dup(dev_null_fd) < 0) {
1812                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1813                         strerror(errno));
1814                 _exit(0);
1815         }
1816         close(dev_null_fd);
1817         daemon_pid = getpid();
1818         return 0;
1819 }
1820
1821 int
1822 main (int argc, char *argv[])
1823 {
1824         extern char *optarg;
1825         extern int optind;
1826         int arg;
1827         int err;
1828
1829         logsink = 1;
1830         running_state = DAEMON_INIT;
1831         dm_init();
1832
1833         if (getuid() != 0) {
1834                 fprintf(stderr, "need to be root\n");
1835                 exit(1);
1836         }
1837
1838         /* make sure we don't lock any path */
1839         if (chdir("/") < 0)
1840                 fprintf(stderr, "can't chdir to root directory : %s\n",
1841                         strerror(errno));
1842         umask(umask(077) | 022);
1843
1844         conf = alloc_config();
1845
1846         if (!conf)
1847                 exit(1);
1848
1849         while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1850         switch(arg) {
1851                 case 'd':
1852                         logsink = 0;
1853                         //debug=1; /* ### comment me out ### */
1854                         break;
1855                 case 'v':
1856                         if (sizeof(optarg) > sizeof(char *) ||
1857                             !isdigit(optarg[0]))
1858                                 exit(1);
1859
1860                         conf->verbosity = atoi(optarg);
1861                         break;
1862                 case 'k':
1863                         uxclnt(optarg);
1864                         exit(0);
1865                 default:
1866                         ;
1867                 }
1868         }
1869         if (optind < argc) {
1870                 char cmd[CMDSIZE];
1871                 char * s = cmd;
1872                 char * c = s;
1873
1874                 while (optind < argc) {
1875                         if (strchr(argv[optind], ' '))
1876                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1877                         else
1878                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1879                         optind++;
1880                 }
1881                 c += snprintf(c, s + CMDSIZE - c, "\n");
1882                 uxclnt(s);
1883                 exit(0);
1884         }
1885
1886         if (!logsink)
1887                 err = 0;
1888         else
1889                 err = daemonize();
1890
1891         if (err < 0)
1892                 /* error */
1893                 exit(1);
1894         else if (err > 0)
1895                 /* parent dies */
1896                 exit(0);
1897         else
1898                 /* child lives */
1899                 return (child(NULL));
1900 }
1901
1902 void *  mpath_pr_event_handler_fn (void * pathp )
1903 {
1904         struct multipath * mpp;
1905         int i,j, ret, isFound;
1906         struct path * pp = (struct path *)pathp;
1907         unsigned char *keyp;
1908         uint64_t prkey;
1909         struct prout_param_descriptor *param;
1910         struct prin_resp *resp;
1911
1912         mpp = pp->mpp;
1913
1914         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1915         if (!resp){
1916                 condlog(0,"%s Alloc failed for prin response", pp->dev);
1917                 return NULL;
1918         }
1919
1920         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1921         if (ret != MPATH_PR_SUCCESS )
1922         {
1923                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1924                 goto out;
1925         }
1926
1927         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1928                         resp->prin_descriptor.prin_readkeys.additional_length );
1929
1930         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1931         {
1932                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1933                 ret = MPATH_PR_SUCCESS;
1934                 goto out;
1935         }
1936         prkey = 0;
1937         keyp = (unsigned char *)mpp->reservation_key;
1938         for (j = 0; j < 8; ++j) {
1939                 if (j > 0)
1940                         prkey <<= 8;
1941                 prkey |= *keyp;
1942                 ++keyp;
1943         }
1944         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
1945
1946         isFound =0;
1947         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1948         {
1949                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
1950                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1951                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1952                 {
1953                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1954                         isFound =1;
1955                         break;
1956                 }
1957         }
1958         if (!isFound)
1959         {
1960                 condlog(0, "%s: Either device not registered or ", pp->dev);
1961                 condlog(0, "host is not authorised for registration. Skip path");
1962                 ret = MPATH_PR_OTHER;
1963                 goto out;
1964         }
1965
1966         param= malloc(sizeof(struct prout_param_descriptor));
1967         memset(param, 0 , sizeof(struct prout_param_descriptor));
1968
1969         for (j = 7; j >= 0; --j) {
1970                 param->sa_key[j] = (prkey & 0xff);
1971                 prkey >>= 8;
1972         }
1973         param->num_transportid = 0;
1974
1975         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1976
1977         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1978         if (ret != MPATH_PR_SUCCESS )
1979         {
1980                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1981         }
1982         mpp->prflag = 1;
1983
1984         free(param);
1985 out:
1986         free(resp);
1987         return NULL;
1988 }
1989
1990 int mpath_pr_event_handle(struct path *pp)
1991 {
1992         pthread_t thread;
1993         int rc;
1994         pthread_attr_t attr;
1995         struct multipath * mpp;
1996
1997         mpp = pp->mpp;
1998
1999         if (!mpp->reservation_key)
2000                 return -1;
2001
2002         pthread_attr_init(&attr);
2003         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2004
2005         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2006         if (rc) {
2007                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2008                 return -1;
2009         }
2010         pthread_attr_destroy(&attr);
2011         rc = pthread_join(thread, NULL);
2012         return 0;
2013 }
2014