91d7bfc213b9d720446756fb11ed21b7a4a7adb1
[multipath-tools/.git] / multipathd / main.c
1 /*
2  * Copyright (c) 2004, 2005 Christophe Varoqui
3  * Copyright (c) 2005 Kiyoshi Ueda, NEC
4  * Copyright (c) 2005 Benjamin Marzinski, Redhat
5  * Copyright (c) 2005 Edward Goggin, EMC
6  */
7 #include <unistd.h>
8 #include <sys/stat.h>
9 #include <libdevmapper.h>
10 #include <wait.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <limits.h>
18 #include <linux/oom.h>
19 #include <libudev.h>
20 #include <semaphore.h>
21 #include <mpath_persist.h>
22
23 /*
24  * libcheckers
25  */
26 #include <checkers.h>
27
28 /*
29  * libmultipath
30  */
31 #include <parser.h>
32 #include <vector.h>
33 #include <memory.h>
34 #include <config.h>
35 #include <util.h>
36 #include <hwtable.h>
37 #include <defaults.h>
38 #include <structs.h>
39 #include <blacklist.h>
40 #include <structs_vec.h>
41 #include <dmparser.h>
42 #include <devmapper.h>
43 #include <sysfs.h>
44 #include <dict.h>
45 #include <discovery.h>
46 #include <debug.h>
47 #include <propsel.h>
48 #include <uevent.h>
49 #include <switchgroup.h>
50 #include <print.h>
51 #include <configure.h>
52 #include <prio.h>
53 #include <pgpolicies.h>
54 #include <uevent.h>
55 #include <log.h>
56
57 #include "main.h"
58 #include "pidfile.h"
59 #include "uxlsnr.h"
60 #include "uxclnt.h"
61 #include "cli.h"
62 #include "cli_handlers.h"
63 #include "lock.h"
64 #include "waiter.h"
65 #include "wwids.h"
66
67 #define FILE_NAME_SIZE 256
68 #define CMDSIZE 160
69
70 #define LOG_MSG(a, b) \
71 do { \
72         if (pp->offline) \
73                 condlog(a, "%s: %s - path offline", pp->mpp->alias, pp->dev); \
74         else if (strlen(b)) \
75                 condlog(a, "%s: %s - %s", pp->mpp->alias, pp->dev, b); \
76 } while(0)
77
78 struct mpath_event_param
79 {
80         char * devname;
81         struct multipath *mpp;
82 };
83
84 unsigned int mpath_mx_alloc_len;
85
86 int logsink;
87 enum daemon_status running_state;
88 pid_t daemon_pid;
89
90 static sem_t exit_sem;
91 /*
92  * global copy of vecs for use in sig handlers
93  */
94 struct vectors * gvecs;
95
96 struct udev * udev;
97
98 static int
99 need_switch_pathgroup (struct multipath * mpp, int refresh)
100 {
101         struct pathgroup * pgp;
102         struct path * pp;
103         unsigned int i, j;
104
105         if (!mpp || mpp->pgfailback == -FAILBACK_MANUAL)
106                 return 0;
107
108         /*
109          * Refresh path priority values
110          */
111         if (refresh)
112                 vector_foreach_slot (mpp->pg, pgp, i)
113                         vector_foreach_slot (pgp->paths, pp, j)
114                                 pathinfo(pp, conf->hwtable, DI_PRIO);
115
116         mpp->bestpg = select_path_group(mpp);
117
118         if (mpp->bestpg != mpp->nextpg)
119                 return 1;
120
121         return 0;
122 }
123
124 static void
125 switch_pathgroup (struct multipath * mpp)
126 {
127         mpp->stat_switchgroup++;
128         dm_switchgroup(mpp->alias, mpp->bestpg);
129         condlog(2, "%s: switch to path group #%i",
130                  mpp->alias, mpp->bestpg);
131 }
132
133 static int
134 coalesce_maps(struct vectors *vecs, vector nmpv)
135 {
136         struct multipath * ompp;
137         vector ompv = vecs->mpvec;
138         unsigned int i;
139         int j;
140
141         vector_foreach_slot (ompv, ompp, i) {
142                 condlog(3, "%s: coalesce map", ompp->alias);
143                 if (!find_mp_by_wwid(nmpv, ompp->wwid)) {
144                         /*
145                          * remove all current maps not allowed by the
146                          * current configuration
147                          */
148                         if (dm_flush_map(ompp->alias)) {
149                                 condlog(0, "%s: unable to flush devmap",
150                                         ompp->alias);
151                                 /*
152                                  * may be just because the device is open
153                                  */
154                                 if (!vector_alloc_slot(nmpv))
155                                         return 1;
156
157                                 vector_set_slot(nmpv, ompp);
158                                 setup_multipath(vecs, ompp);
159
160                                 if ((j = find_slot(ompv, (void *)ompp)) != -1)
161                                         vector_del_slot(ompv, j);
162
163                                 continue;
164                         }
165                         else {
166                                 dm_lib_release();
167                                 condlog(2, "%s devmap removed", ompp->alias);
168                         }
169                 } else if (conf->reassign_maps) {
170                         condlog(3, "%s: Reassign existing device-mapper"
171                                 " devices", ompp->alias);
172                         dm_reassign(ompp->alias);
173                 }
174         }
175         return 0;
176 }
177
178 void
179 sync_map_state(struct multipath *mpp)
180 {
181         struct pathgroup *pgp;
182         struct path *pp;
183         unsigned int i, j;
184
185         if (!mpp->pg)
186                 return;
187
188         vector_foreach_slot (mpp->pg, pgp, i){
189                 vector_foreach_slot (pgp->paths, pp, j){
190                         if (pp->state == PATH_UNCHECKED || 
191                             pp->state == PATH_WILD)
192                                 continue;
193                         if ((pp->dmstate == PSTATE_FAILED ||
194                              pp->dmstate == PSTATE_UNDEF) &&
195                             (pp->state == PATH_UP || pp->state == PATH_GHOST))
196                                 dm_reinstate_path(mpp->alias, pp->dev_t);
197                         else if ((pp->dmstate == PSTATE_ACTIVE ||
198                                   pp->dmstate == PSTATE_UNDEF) &&
199                                  (pp->state == PATH_DOWN ||
200                                   pp->state == PATH_SHAKY))
201                                 dm_fail_path(mpp->alias, pp->dev_t);
202                 }
203         }
204 }
205
206 static void
207 sync_maps_state(vector mpvec)
208 {
209         unsigned int i;
210         struct multipath *mpp;
211
212         vector_foreach_slot (mpvec, mpp, i)
213                 sync_map_state(mpp);
214 }
215
216 static int
217 flush_map(struct multipath * mpp, struct vectors * vecs)
218 {
219         /*
220          * clear references to this map before flushing so we can ignore
221          * the spurious uevent we may generate with the dm_flush_map call below
222          */
223         if (dm_flush_map(mpp->alias)) {
224                 /*
225                  * May not really be an error -- if the map was already flushed
226                  * from the device mapper by dmsetup(8) for instance.
227                  */
228                 condlog(0, "%s: can't flush", mpp->alias);
229                 return 1;
230         }
231         else {
232                 dm_lib_release();
233                 condlog(2, "%s: map flushed", mpp->alias);
234         }
235
236         orphan_paths(vecs->pathvec, mpp);
237         remove_map_and_stop_waiter(mpp, vecs, 1);
238
239         return 0;
240 }
241
242 static int
243 uev_add_map (struct uevent * uev, struct vectors * vecs)
244 {
245         char *alias;
246         int major = -1, minor = -1, rc;
247
248         condlog(3, "%s: add map (uevent)", uev->kernel);
249         alias = uevent_get_dm_name(uev);
250         if (!alias) {
251                 condlog(3, "%s: No DM_NAME in uevent", uev->kernel);
252                 major = uevent_get_major(uev);
253                 minor = uevent_get_minor(uev);
254                 alias = dm_mapname(major, minor);
255                 if (!alias) {
256                         condlog(2, "%s: mapname not found for %d:%d",
257                                 uev->kernel, major, minor);
258                         return 1;
259                 }
260         }
261         rc = ev_add_map(uev->kernel, alias, vecs);
262         FREE(alias);
263         return rc;
264 }
265
266 int
267 ev_add_map (char * dev, char * alias, struct vectors * vecs)
268 {
269         char * refwwid;
270         struct multipath * mpp;
271         int map_present;
272         int r = 1;
273
274         map_present = dm_map_present(alias);
275
276         if (map_present && dm_type(alias, TGT_MPATH) <= 0) {
277                 condlog(4, "%s: not a multipath map", alias);
278                 return 0;
279         }
280
281         mpp = find_mp_by_alias(vecs->mpvec, alias);
282
283         if (mpp) {
284                 /*
285                  * Not really an error -- we generate our own uevent
286                  * if we create a multipath mapped device as a result
287                  * of uev_add_path
288                  */
289                 if (conf->reassign_maps) {
290                         condlog(3, "%s: Reassign existing device-mapper devices",
291                                 alias);
292                         dm_reassign(alias);
293                 }
294                 return 0;
295         }
296         condlog(2, "%s: adding map", alias);
297
298         /*
299          * now we can register the map
300          */
301         if (map_present && (mpp = add_map_without_path(vecs, alias))) {
302                 sync_map_state(mpp);
303                 condlog(2, "%s: devmap %s registered", alias, dev);
304                 return 0;
305         }
306         r = get_refwwid(dev, DEV_DEVMAP, vecs->pathvec, &refwwid);
307
308         if (refwwid) {
309                 r = coalesce_paths(vecs, NULL, refwwid, 0);
310                 dm_lib_release();
311         }
312
313         if (!r)
314                 condlog(2, "%s: devmap %s added", alias, dev);
315         else if (r == 2)
316                 condlog(2, "%s: uev_add_map %s blacklisted", alias, dev);
317         else
318                 condlog(0, "%s: uev_add_map %s failed", alias, dev);
319
320         FREE(refwwid);
321         return r;
322 }
323
324 static int
325 uev_remove_map (struct uevent * uev, struct vectors * vecs)
326 {
327         char *alias;
328         int minor;
329         struct multipath *mpp;
330
331         condlog(2, "%s: remove map (uevent)", uev->kernel);
332         alias = uevent_get_dm_name(uev);
333         if (!alias) {
334                 condlog(3, "%s: No DM_NAME in uevent, ignoring", uev->kernel);
335                 return 0;
336         }
337         minor = uevent_get_minor(uev);
338         mpp = find_mp_by_minor(vecs->mpvec, minor);
339
340         if (!mpp) {
341                 condlog(2, "%s: devmap not registered, can't remove",
342                         uev->kernel);
343                 goto out;
344         }
345         if (strcmp(mpp->alias, alias)) {
346                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
347                         mpp->alias, mpp->dmi->minor, minor);
348                 goto out;
349         }
350
351         orphan_paths(vecs->pathvec, mpp);
352         remove_map_and_stop_waiter(mpp, vecs, 1);
353 out:
354         FREE(alias);
355         return 0;
356 }
357
358 int
359 ev_remove_map (char * devname, char * alias, int minor, struct vectors * vecs)
360 {
361         struct multipath * mpp;
362
363         mpp = find_mp_by_minor(vecs->mpvec, minor);
364
365         if (!mpp) {
366                 condlog(2, "%s: devmap not registered, can't remove",
367                         devname);
368                 return 0;
369         }
370         if (strcmp(mpp->alias, alias)) {
371                 condlog(2, "%s: minor number mismatch (map %d, event %d)",
372                         mpp->alias, mpp->dmi->minor, minor);
373                 return 0;
374         }
375         return flush_map(mpp, vecs);
376 }
377
378 static int
379 uev_add_path (struct uevent *uev, struct vectors * vecs)
380 {
381         struct path *pp;
382         int ret, i;
383
384         condlog(2, "%s: add path (uevent)", uev->kernel);
385         if (strstr(uev->kernel, "..") != NULL) {
386                 /*
387                  * Don't allow relative device names in the pathvec
388                  */
389                 condlog(0, "%s: path name is invalid", uev->kernel);
390                 return 1;
391         }
392
393         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
394         if (pp) {
395                 condlog(0, "%s: spurious uevent, path already in pathvec",
396                         uev->kernel);
397                 if (pp->mpp)
398                         return 0;
399                 if (!strlen(pp->wwid)) {
400                         udev_device_unref(pp->udev);
401                         pp->udev = udev_device_ref(uev->udev);
402                         ret = pathinfo(pp, conf->hwtable,
403                                        DI_ALL | DI_BLACKLIST);
404                         if (ret == 2) {
405                                 i = find_slot(vecs->pathvec, (void *)pp);
406                                 if (i != -1)
407                                         vector_del_slot(vecs->pathvec, i);
408                                 free_path(pp);
409                                 return 0;
410                         } else if (ret == 1) {
411                                 condlog(0, "%s: failed to reinitialize path",
412                                         uev->kernel);
413                                 return 1;
414                         }
415                 }
416         } else {
417                 /*
418                  * get path vital state
419                  */
420                 ret = store_pathinfo(vecs->pathvec, conf->hwtable,
421                                      uev->udev, DI_ALL, &pp);
422                 if (!pp) {
423                         if (ret == 2)
424                                 return 0;
425                         condlog(0, "%s: failed to store path info",
426                                 uev->kernel);
427                         return 1;
428                 }
429                 pp->checkint = conf->checkint;
430         }
431
432         return ev_add_path(pp, vecs);
433 }
434
435 /*
436  * returns:
437  * 0: added
438  * 1: error
439  */
440 int
441 ev_add_path (struct path * pp, struct vectors * vecs)
442 {
443         struct multipath * mpp;
444         char empty_buff[WWID_SIZE] = {0};
445         char params[PARAMS_SIZE] = {0};
446         int retries = 3;
447         int start_waiter = 0;
448
449         /*
450          * need path UID to go any further
451          */
452         if (memcmp(empty_buff, pp->wwid, WWID_SIZE) == 0) {
453                 condlog(0, "%s: failed to get path uid", pp->dev);
454                 goto fail; /* leave path added to pathvec */
455         }
456         mpp = pp->mpp = find_mp_by_wwid(vecs->mpvec, pp->wwid);
457 rescan:
458         if (mpp) {
459                 if ((!pp->size) || (mpp->size != pp->size)) {
460                         if (!pp->size)
461                                 condlog(0, "%s: failed to add new path %s, "
462                                         "device size is 0",
463                                         mpp->alias, pp->dev);
464                         else
465                                 condlog(0, "%s: failed to add new path %s, "
466                                         "device size mismatch",
467                                         mpp->alias, pp->dev);
468                         int i = find_slot(vecs->pathvec, (void *)pp);
469                         if (i != -1)
470                                 vector_del_slot(vecs->pathvec, i);
471                         free_path(pp);
472                         return 1;
473                 }
474
475                 condlog(4,"%s: adopting all paths for path %s",
476                         mpp->alias, pp->dev);
477                 if (adopt_paths(vecs->pathvec, mpp, 1))
478                         goto fail; /* leave path added to pathvec */
479
480                 verify_paths(mpp, vecs, NULL);
481                 mpp->flush_on_last_del = FLUSH_UNDEF;
482                 mpp->action = ACT_RELOAD;
483         }
484         else {
485                 if (!pp->size) {
486                         condlog(0, "%s: failed to create new map,"
487                                 " device size is 0 ", pp->dev);
488                         int i = find_slot(vecs->pathvec, (void *)pp);
489                         if (i != -1)
490                                 vector_del_slot(vecs->pathvec, i);
491                         free_path(pp);
492                         return 1;
493                 }
494
495                 condlog(4,"%s: creating new map", pp->dev);
496                 if ((mpp = add_map_with_path(vecs, pp, 1))) {
497                         mpp->action = ACT_CREATE;
498                         /*
499                          * We don't depend on ACT_CREATE, as domap will
500                          * set it to ACT_NOTHING when complete.
501                          */
502                         start_waiter = 1;
503                 }
504                 else
505                         goto fail; /* leave path added to pathvec */
506         }
507
508         /* persistent reseravtion check*/
509         mpath_pr_event_handle(pp);      
510
511         /*
512          * push the map to the device-mapper
513          */
514         if (setup_map(mpp, params, PARAMS_SIZE)) {
515                 condlog(0, "%s: failed to setup map for addition of new "
516                         "path %s", mpp->alias, pp->dev);
517                 goto fail_map;
518         }
519         /*
520          * reload the map for the multipath mapped device
521          */
522         if (domap(mpp, params) <= 0) {
523                 condlog(0, "%s: failed in domap for addition of new "
524                         "path %s", mpp->alias, pp->dev);
525                 /*
526                  * deal with asynchronous uevents :((
527                  */
528                 if (mpp->action == ACT_RELOAD && retries-- > 0) {
529                         condlog(0, "%s: uev_add_path sleep", mpp->alias);
530                         sleep(1);
531                         update_mpp_paths(mpp, vecs->pathvec);
532                         goto rescan;
533                 }
534                 else if (mpp->action == ACT_RELOAD)
535                         condlog(0, "%s: giving up reload", mpp->alias);
536                 else
537                         goto fail_map;
538         }
539         dm_lib_release();
540
541         /*
542          * update our state from kernel regardless of create or reload
543          */
544         if (setup_multipath(vecs, mpp))
545                 goto fail; /* if setup_multipath fails, it removes the map */
546
547         sync_map_state(mpp);
548
549         if ((mpp->action == ACT_CREATE ||
550              (mpp->action == ACT_NOTHING && start_waiter && !mpp->waiter)) &&
551             start_waiter_thread(mpp, vecs))
552                         goto fail_map;
553
554         if (retries >= 0) {
555                 condlog(2, "%s [%s]: path added to devmap %s",
556                         pp->dev, pp->dev_t, mpp->alias);
557                 return 0;
558         }
559         else
560                 return 1;
561
562 fail_map:
563         remove_map(mpp, vecs, 1);
564 fail:
565         orphan_path(pp, "failed to add path");
566         return 1;
567 }
568
569 static int
570 uev_remove_path (struct uevent *uev, struct vectors * vecs)
571 {
572         struct path *pp;
573
574         condlog(2, "%s: remove path (uevent)", uev->kernel);
575         pp = find_path_by_dev(vecs->pathvec, uev->kernel);
576
577         if (!pp) {
578                 /* Not an error; path might have been purged earlier */
579                 condlog(0, "%s: path already removed", uev->kernel);
580                 return 0;
581         }
582
583         return ev_remove_path(pp, vecs);
584 }
585
586 int
587 ev_remove_path (struct path *pp, struct vectors * vecs)
588 {
589         struct multipath * mpp;
590         int i, retval = 0;
591         char params[PARAMS_SIZE] = {0};
592
593         /*
594          * avoid referring to the map of an orphaned path
595          */
596         if ((mpp = pp->mpp)) {
597                 /*
598                  * transform the mp->pg vector of vectors of paths
599                  * into a mp->params string to feed the device-mapper
600                  */
601                 if (update_mpp_paths(mpp, vecs->pathvec)) {
602                         condlog(0, "%s: failed to update paths",
603                                 mpp->alias);
604                         goto fail;
605                 }
606                 if ((i = find_slot(mpp->paths, (void *)pp)) != -1)
607                         vector_del_slot(mpp->paths, i);
608
609                 /*
610                  * remove the map IFF removing the last path
611                  */
612                 if (VECTOR_SIZE(mpp->paths) == 0) {
613                         char alias[WWID_SIZE];
614
615                         /*
616                          * flush_map will fail if the device is open
617                          */
618                         strncpy(alias, mpp->alias, WWID_SIZE);
619                         if (mpp->flush_on_last_del == FLUSH_ENABLED) {
620                                 condlog(2, "%s Last path deleted, disabling queueing", mpp->alias);
621                                 mpp->retry_tick = 0;
622                                 mpp->no_path_retry = NO_PATH_RETRY_FAIL;
623                                 mpp->flush_on_last_del = FLUSH_IN_PROGRESS;
624                                 dm_queue_if_no_path(mpp->alias, 0);
625                         }
626                         if (!flush_map(mpp, vecs)) {
627                                 condlog(2, "%s: removed map after"
628                                         " removing all paths",
629                                         alias);
630                                 retval = 0;
631                                 goto out;
632                         }
633                         /*
634                          * Not an error, continue
635                          */
636                 }
637
638                 if (setup_map(mpp, params, PARAMS_SIZE)) {
639                         condlog(0, "%s: failed to setup map for"
640                                 " removal of path %s", mpp->alias, pp->dev);
641                         goto fail;
642                 }
643                 /*
644                  * reload the map
645                  */
646                 mpp->action = ACT_RELOAD;
647                 if (domap(mpp, params) <= 0) {
648                         condlog(0, "%s: failed in domap for "
649                                 "removal of path %s",
650                                 mpp->alias, pp->dev);
651                         retval = 1;
652                 } else {
653                         /*
654                          * update our state from kernel
655                          */
656                         if (setup_multipath(vecs, mpp)) {
657                                 goto fail;
658                         }
659                         sync_map_state(mpp);
660
661                         condlog(2, "%s [%s]: path removed from map %s",
662                                 pp->dev, pp->dev_t, mpp->alias);
663                 }
664         }
665
666 out:
667         if ((i = find_slot(vecs->pathvec, (void *)pp)) != -1)
668                 vector_del_slot(vecs->pathvec, i);
669
670         free_path(pp);
671
672         return retval;
673
674 fail:
675         remove_map_and_stop_waiter(mpp, vecs, 1);
676         return 1;
677 }
678
679 static int
680 uev_update_path (struct uevent *uev, struct vectors * vecs)
681 {
682         int ro, retval = 0;
683
684         ro = uevent_get_disk_ro(uev);
685
686         if (ro >= 0) {
687                 struct path * pp;
688
689                 condlog(2, "%s: update path write_protect to '%d' (uevent)",
690                         uev->kernel, ro);
691                 pp = find_path_by_dev(vecs->pathvec, uev->kernel);
692                 if (!pp) {
693                         condlog(0, "%s: spurious uevent, path not found",
694                                 uev->kernel);
695                         return 1;
696                 }
697                 if (pp->mpp) {
698                         retval = reload_map(vecs, pp->mpp, 0);
699
700                         condlog(2, "%s: map %s reloaded (retval %d)",
701                                 uev->kernel, pp->mpp->alias, retval);
702                 }
703
704         }
705
706         return retval;
707 }
708
709 static int
710 map_discovery (struct vectors * vecs)
711 {
712         struct multipath * mpp;
713         unsigned int i;
714
715         if (dm_get_maps(vecs->mpvec))
716                 return 1;
717
718         vector_foreach_slot (vecs->mpvec, mpp, i)
719                 if (setup_multipath(vecs, mpp))
720                         return 1;
721
722         return 0;
723 }
724
725 int
726 uxsock_trigger (char * str, char ** reply, int * len, void * trigger_data)
727 {
728         struct vectors * vecs;
729         int r;
730
731         *reply = NULL;
732         *len = 0;
733         vecs = (struct vectors *)trigger_data;
734
735         pthread_cleanup_push(cleanup_lock, &vecs->lock);
736         lock(vecs->lock);
737         pthread_testcancel();
738
739         r = parse_cmd(str, reply, len, vecs);
740
741         if (r > 0) {
742                 *reply = STRDUP("fail\n");
743                 *len = strlen(*reply) + 1;
744                 r = 1;
745         }
746         else if (!r && *len == 0) {
747                 *reply = STRDUP("ok\n");
748                 *len = strlen(*reply) + 1;
749                 r = 0;
750         }
751         /* else if (r < 0) leave *reply alone */
752
753         lock_cleanup_pop(vecs->lock);
754         return r;
755 }
756
757 static int
758 uev_discard(char * devpath)
759 {
760         char *tmp;
761         char a[11], b[11];
762
763         /*
764          * keep only block devices, discard partitions
765          */
766         tmp = strstr(devpath, "/block/");
767         if (tmp == NULL){
768                 condlog(4, "no /block/ in '%s'", devpath);
769                 return 1;
770         }
771         if (sscanf(tmp, "/block/%10s", a) != 1 ||
772             sscanf(tmp, "/block/%10[^/]/%10s", a, b) == 2) {
773                 condlog(4, "discard event on %s", devpath);
774                 return 1;
775         }
776         return 0;
777 }
778
779 int
780 uev_trigger (struct uevent * uev, void * trigger_data)
781 {
782         int r = 0;
783         struct vectors * vecs;
784
785         vecs = (struct vectors *)trigger_data;
786
787         if (uev_discard(uev->devpath))
788                 return 0;
789
790         pthread_cleanup_push(cleanup_lock, &vecs->lock);
791         lock(vecs->lock);
792         pthread_testcancel();
793
794         /*
795          * device map event
796          * Add events are ignored here as the tables
797          * are not fully initialised then.
798          */
799         if (!strncmp(uev->kernel, "dm-", 3)) {
800                 if (!strncmp(uev->action, "change", 6)) {
801                         r = uev_add_map(uev, vecs);
802                         goto out;
803                 }
804                 if (!strncmp(uev->action, "remove", 6)) {
805                         r = uev_remove_map(uev, vecs);
806                         goto out;
807                 }
808                 goto out;
809         }
810
811         /*
812          * path add/remove event
813          */
814         if (filter_devnode(conf->blist_devnode, conf->elist_devnode,
815                            uev->kernel) > 0)
816                 goto out;
817
818         if (!strncmp(uev->action, "add", 3)) {
819                 r = uev_add_path(uev, vecs);
820                 goto out;
821         }
822         if (!strncmp(uev->action, "remove", 6)) {
823                 r = uev_remove_path(uev, vecs);
824                 goto out;
825         }
826         if (!strncmp(uev->action, "change", 6)) {
827                 r = uev_update_path(uev, vecs);
828                 goto out;
829         }
830
831 out:
832         lock_cleanup_pop(vecs->lock);
833         return r;
834 }
835
836 static void *
837 ueventloop (void * ap)
838 {
839         if (uevent_listen(udev))
840                 condlog(0, "error starting uevent listener");
841
842         return NULL;
843 }
844
845 static void *
846 uevqloop (void * ap)
847 {
848         if (uevent_dispatch(&uev_trigger, ap))
849                 condlog(0, "error starting uevent dispatcher");
850
851         return NULL;
852 }
853 static void *
854 uxlsnrloop (void * ap)
855 {
856         if (cli_init())
857                 return NULL;
858
859         set_handler_callback(LIST+PATHS, cli_list_paths);
860         set_handler_callback(LIST+PATHS+FMT, cli_list_paths_fmt);
861         set_handler_callback(LIST+MAPS, cli_list_maps);
862         set_handler_callback(LIST+STATUS, cli_list_status);
863         set_handler_callback(LIST+DAEMON, cli_list_daemon);
864         set_handler_callback(LIST+MAPS+STATUS, cli_list_maps_status);
865         set_handler_callback(LIST+MAPS+STATS, cli_list_maps_stats);
866         set_handler_callback(LIST+MAPS+FMT, cli_list_maps_fmt);
867         set_handler_callback(LIST+MAPS+TOPOLOGY, cli_list_maps_topology);
868         set_handler_callback(LIST+TOPOLOGY, cli_list_maps_topology);
869         set_handler_callback(LIST+MAP+TOPOLOGY, cli_list_map_topology);
870         set_handler_callback(LIST+CONFIG, cli_list_config);
871         set_handler_callback(LIST+BLACKLIST, cli_list_blacklist);
872         set_handler_callback(LIST+DEVICES, cli_list_devices);
873         set_handler_callback(LIST+WILDCARDS, cli_list_wildcards);
874         set_handler_callback(ADD+PATH, cli_add_path);
875         set_handler_callback(DEL+PATH, cli_del_path);
876         set_handler_callback(ADD+MAP, cli_add_map);
877         set_handler_callback(DEL+MAP, cli_del_map);
878         set_handler_callback(SWITCH+MAP+GROUP, cli_switch_group);
879         set_handler_callback(RECONFIGURE, cli_reconfigure);
880         set_handler_callback(SUSPEND+MAP, cli_suspend);
881         set_handler_callback(RESUME+MAP, cli_resume);
882         set_handler_callback(RESIZE+MAP, cli_resize);
883         set_handler_callback(RELOAD+MAP, cli_reload);
884         set_handler_callback(RESET+MAP, cli_reassign);
885         set_handler_callback(REINSTATE+PATH, cli_reinstate);
886         set_handler_callback(FAIL+PATH, cli_fail);
887         set_handler_callback(DISABLEQ+MAP, cli_disable_queueing);
888         set_handler_callback(RESTOREQ+MAP, cli_restore_queueing);
889         set_handler_callback(DISABLEQ+MAPS, cli_disable_all_queueing);
890         set_handler_callback(RESTOREQ+MAPS, cli_restore_all_queueing);
891         set_handler_callback(QUIT, cli_quit);
892         set_handler_callback(SHUTDOWN, cli_shutdown);
893         set_handler_callback(GETPRSTATUS+MAP, cli_getprstatus);
894         set_handler_callback(SETPRSTATUS+MAP, cli_setprstatus);
895         set_handler_callback(UNSETPRSTATUS+MAP, cli_unsetprstatus);
896         set_handler_callback(FORCEQ+DAEMON, cli_force_no_daemon_q);
897         set_handler_callback(RESTOREQ+DAEMON, cli_restore_no_daemon_q);
898
899         umask(077);
900         uxsock_listen(&uxsock_trigger, ap);
901
902         return NULL;
903 }
904
905 void
906 exit_daemon (void)
907 {
908         sem_post(&exit_sem);
909 }
910
911 const char *
912 daemon_status(void)
913 {
914         switch (running_state) {
915         case DAEMON_INIT:
916                 return "init";
917         case DAEMON_START:
918                 return "startup";
919         case DAEMON_CONFIGURE:
920                 return "configure";
921         case DAEMON_RUNNING:
922                 return "running";
923         case DAEMON_SHUTDOWN:
924                 return "shutdown";
925         }
926         return NULL;
927 }
928
929 static void
930 fail_path (struct path * pp, int del_active)
931 {
932         if (!pp->mpp)
933                 return;
934
935         condlog(2, "checker failed path %s in map %s",
936                  pp->dev_t, pp->mpp->alias);
937
938         dm_fail_path(pp->mpp->alias, pp->dev_t);
939         if (del_active)
940                 update_queue_mode_del_path(pp->mpp);
941 }
942
943 /*
944  * caller must have locked the path list before calling that function
945  */
946 static void
947 reinstate_path (struct path * pp, int add_active)
948 {
949         if (!pp->mpp)
950                 return;
951
952         if (dm_reinstate_path(pp->mpp->alias, pp->dev_t))
953                 condlog(0, "%s: reinstate failed", pp->dev_t);
954         else {
955                 condlog(2, "%s: reinstated", pp->dev_t);
956                 if (add_active)
957                         update_queue_mode_add_path(pp->mpp);
958         }
959 }
960
961 static void
962 enable_group(struct path * pp)
963 {
964         struct pathgroup * pgp;
965
966         /*
967          * if path is added through uev_add_path, pgindex can be unset.
968          * next update_strings() will set it, upon map reload event.
969          *
970          * we can safely return here, because upon map reload, all
971          * PG will be enabled.
972          */
973         if (!pp->mpp->pg || !pp->pgindex)
974                 return;
975
976         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
977
978         if (pgp->status == PGSTATE_DISABLED) {
979                 condlog(2, "%s: enable group #%i", pp->mpp->alias, pp->pgindex);
980                 dm_enablegroup(pp->mpp->alias, pp->pgindex);
981         }
982 }
983
984 static void
985 mpvec_garbage_collector (struct vectors * vecs)
986 {
987         struct multipath * mpp;
988         unsigned int i;
989
990         if (!vecs->mpvec)
991                 return;
992
993         vector_foreach_slot (vecs->mpvec, mpp, i) {
994                 if (mpp && mpp->alias && !dm_map_present(mpp->alias)) {
995                         condlog(2, "%s: remove dead map", mpp->alias);
996                         remove_map_and_stop_waiter(mpp, vecs, 1);
997                         i--;
998                 }
999         }
1000 }
1001
1002 /* This is called after a path has started working again. It the multipath
1003  * device for this path uses the followover failback type, and this is the
1004  * best pathgroup, and this is the first path in the pathgroup to come back
1005  * up, then switch to this pathgroup */
1006 static int
1007 followover_should_failback(struct path * pp)
1008 {
1009         struct pathgroup * pgp;
1010         struct path *pp1;
1011         int i;
1012
1013         if (pp->mpp->pgfailback != -FAILBACK_FOLLOWOVER ||
1014             !pp->mpp->pg || !pp->pgindex ||
1015             pp->pgindex != pp->mpp->bestpg)
1016                 return 0;
1017
1018         pgp = VECTOR_SLOT(pp->mpp->pg, pp->pgindex - 1);
1019         vector_foreach_slot(pgp->paths, pp1, i) {
1020                 if (pp1 == pp)
1021                         continue;
1022                 if (pp1->chkrstate != PATH_DOWN && pp1->chkrstate != PATH_SHAKY)
1023                         return 0;
1024         }
1025         return 1;
1026 }
1027
1028 static void
1029 defered_failback_tick (vector mpvec)
1030 {
1031         struct multipath * mpp;
1032         unsigned int i;
1033
1034         vector_foreach_slot (mpvec, mpp, i) {
1035                 /*
1036                  * defered failback getting sooner
1037                  */
1038                 if (mpp->pgfailback > 0 && mpp->failback_tick > 0) {
1039                         mpp->failback_tick--;
1040
1041                         if (!mpp->failback_tick && need_switch_pathgroup(mpp, 1))
1042                                 switch_pathgroup(mpp);
1043                 }
1044         }
1045 }
1046
1047 static void
1048 retry_count_tick(vector mpvec)
1049 {
1050         struct multipath *mpp;
1051         unsigned int i;
1052
1053         vector_foreach_slot (mpvec, mpp, i) {
1054                 if (mpp->retry_tick) {
1055                         mpp->stat_total_queueing_time++;
1056                         condlog(4, "%s: Retrying.. No active path", mpp->alias);
1057                         if(--mpp->retry_tick == 0) {
1058                                 dm_queue_if_no_path(mpp->alias, 0);
1059                                 condlog(2, "%s: Disable queueing", mpp->alias);
1060                         }
1061                 }
1062         }
1063 }
1064
1065 int update_prio(struct path *pp, int refresh_all)
1066 {
1067         int oldpriority;
1068         struct path *pp1;
1069         struct pathgroup * pgp;
1070         int i, j, changed = 0;
1071
1072         if (refresh_all) {
1073                 vector_foreach_slot (pp->mpp->pg, pgp, i) {
1074                         vector_foreach_slot (pgp->paths, pp1, j) {
1075                                 oldpriority = pp1->priority;
1076                                 pathinfo(pp1, conf->hwtable, DI_PRIO);
1077                                 if (pp1->priority != oldpriority)
1078                                         changed = 1;
1079                         }
1080                 }
1081                 return changed;
1082         }
1083         oldpriority = pp->priority;
1084         pathinfo(pp, conf->hwtable, DI_PRIO);
1085
1086         if (pp->priority == oldpriority)
1087                 return 0;
1088         return 1;
1089 }
1090
1091 int update_path_groups(struct multipath *mpp, struct vectors *vecs, int refresh)
1092 {
1093         if (reload_map(vecs, mpp, refresh))
1094                 return 1;
1095
1096         dm_lib_release();
1097         if (setup_multipath(vecs, mpp) != 0)
1098                 return 1;
1099         sync_map_state(mpp);
1100
1101         return 0;
1102 }
1103
1104 void
1105 check_path (struct vectors * vecs, struct path * pp)
1106 {
1107         int newstate;
1108         int new_path_up = 0;
1109         int chkr_new_path_up = 0;
1110         int oldchkrstate = pp->chkrstate;
1111
1112         if (!pp->mpp)
1113                 return;
1114
1115         if (pp->tick && --pp->tick)
1116                 return; /* don't check this path yet */
1117
1118         /*
1119          * provision a next check soonest,
1120          * in case we exit abnormaly from here
1121          */
1122         pp->tick = conf->checkint;
1123
1124         newstate = path_offline(pp);
1125         if (newstate == PATH_UP)
1126                 newstate = get_state(pp, 1);
1127
1128         if (newstate == PATH_WILD || newstate == PATH_UNCHECKED) {
1129                 condlog(2, "%s: unusable path", pp->dev);
1130                 pathinfo(pp, conf->hwtable, 0);
1131                 return;
1132         }
1133         /*
1134          * Async IO in flight. Keep the previous path state
1135          * and reschedule as soon as possible
1136          */
1137         if (newstate == PATH_PENDING) {
1138                 pp->tick = 1;
1139                 return;
1140         }
1141         /*
1142          * Synchronize with kernel state
1143          */
1144         if (update_multipath_strings(pp->mpp, vecs->pathvec)) {
1145                 condlog(1, "%s: Could not synchronize with kernel state",
1146                         pp->dev);
1147                 pp->dmstate = PSTATE_UNDEF;
1148         }
1149         pp->chkrstate = newstate;
1150         if (newstate != pp->state) {
1151                 int oldstate = pp->state;
1152                 pp->state = newstate;
1153                 LOG_MSG(1, checker_message(&pp->checker));
1154
1155                 /*
1156                  * upon state change, reset the checkint
1157                  * to the shortest delay
1158                  */
1159                 pp->checkint = conf->checkint;
1160
1161                 if (newstate == PATH_DOWN || newstate == PATH_SHAKY) {
1162                         /*
1163                          * proactively fail path in the DM
1164                          */
1165                         if (oldstate == PATH_UP ||
1166                             oldstate == PATH_GHOST)
1167                                 fail_path(pp, 1);
1168                         else
1169                                 fail_path(pp, 0);
1170
1171                         /*
1172                          * cancel scheduled failback
1173                          */
1174                         pp->mpp->failback_tick = 0;
1175
1176                         pp->mpp->stat_path_failures++;
1177                         return;
1178                 }
1179
1180                 if(newstate == PATH_UP || newstate == PATH_GHOST){
1181                         if ( pp->mpp && pp->mpp->prflag ){
1182                                 /*
1183                                  * Check Persistent Reservation.
1184                                  */
1185                         condlog(2, "%s: checking persistent reservation "
1186                                 "registration", pp->dev);
1187                         mpath_pr_event_handle(pp);
1188                         }
1189                 }
1190
1191                 /*
1192                  * reinstate this path
1193                  */
1194                 if (oldstate != PATH_UP &&
1195                     oldstate != PATH_GHOST)
1196                         reinstate_path(pp, 1);
1197                 else
1198                         reinstate_path(pp, 0);
1199
1200                 new_path_up = 1;
1201
1202                 if (oldchkrstate != PATH_UP && oldchkrstate != PATH_GHOST)
1203                         chkr_new_path_up = 1;
1204
1205                 /*
1206                  * if at least one path is up in a group, and
1207                  * the group is disabled, re-enable it
1208                  */
1209                 if (newstate == PATH_UP)
1210                         enable_group(pp);
1211         }
1212         else if (newstate == PATH_UP || newstate == PATH_GHOST) {
1213                 if (pp->dmstate == PSTATE_FAILED ||
1214                     pp->dmstate == PSTATE_UNDEF) {
1215                         /* Clear IO errors */
1216                         reinstate_path(pp, 0);
1217                 } else {
1218                         LOG_MSG(4, checker_message(&pp->checker));
1219                         if (pp->checkint != conf->max_checkint) {
1220                                 /*
1221                                  * double the next check delay.
1222                                  * max at conf->max_checkint
1223                                  */
1224                                 if (pp->checkint < (conf->max_checkint / 2))
1225                                         pp->checkint = 2 * pp->checkint;
1226                                 else
1227                                         pp->checkint = conf->max_checkint;
1228
1229                                 condlog(4, "%s: delay next check %is",
1230                                         pp->dev_t, pp->checkint);
1231                         }
1232                         pp->tick = pp->checkint;
1233                 }
1234         }
1235         else if (newstate == PATH_DOWN) {
1236                 if (conf->log_checker_err == LOG_CHKR_ERR_ONCE)
1237                         LOG_MSG(3, checker_message(&pp->checker));
1238                 else
1239                         LOG_MSG(2, checker_message(&pp->checker));
1240         }
1241
1242         pp->state = newstate;
1243
1244         /*
1245          * path prio refreshing
1246          */
1247         condlog(4, "path prio refresh");
1248
1249         if (update_prio(pp, new_path_up) &&
1250             (pp->mpp->pgpolicyfn == (pgpolicyfn *)group_by_prio) &&
1251              pp->mpp->pgfailback == -FAILBACK_IMMEDIATE)
1252                 update_path_groups(pp->mpp, vecs, !new_path_up);
1253         else if (need_switch_pathgroup(pp->mpp, 0)) {
1254                 if (pp->mpp->pgfailback > 0 &&
1255                     (new_path_up || pp->mpp->failback_tick <= 0))
1256                         pp->mpp->failback_tick =
1257                                 pp->mpp->pgfailback + 1;
1258                 else if (pp->mpp->pgfailback == -FAILBACK_IMMEDIATE ||
1259                          (chkr_new_path_up && followover_should_failback(pp)))
1260                         switch_pathgroup(pp->mpp);
1261         }
1262 }
1263
1264 static void *
1265 checkerloop (void *ap)
1266 {
1267         struct vectors *vecs;
1268         struct path *pp;
1269         int count = 0;
1270         unsigned int i;
1271
1272         mlockall(MCL_CURRENT | MCL_FUTURE);
1273         vecs = (struct vectors *)ap;
1274         condlog(2, "path checkers start up");
1275
1276         /*
1277          * init the path check interval
1278          */
1279         vector_foreach_slot (vecs->pathvec, pp, i) {
1280                 pp->checkint = conf->checkint;
1281         }
1282
1283         while (1) {
1284                 pthread_cleanup_push(cleanup_lock, &vecs->lock);
1285                 lock(vecs->lock);
1286                 pthread_testcancel();
1287                 condlog(4, "tick");
1288
1289                 if (vecs->pathvec) {
1290                         vector_foreach_slot (vecs->pathvec, pp, i) {
1291                                 check_path(vecs, pp);
1292                         }
1293                 }
1294                 if (vecs->mpvec) {
1295                         defered_failback_tick(vecs->mpvec);
1296                         retry_count_tick(vecs->mpvec);
1297                 }
1298                 if (count)
1299                         count--;
1300                 else {
1301                         condlog(4, "map garbage collection");
1302                         mpvec_garbage_collector(vecs);
1303                         count = MAPGCINT;
1304                 }
1305
1306                 lock_cleanup_pop(vecs->lock);
1307                 sleep(1);
1308         }
1309         return NULL;
1310 }
1311
1312 int
1313 configure (struct vectors * vecs, int start_waiters)
1314 {
1315         struct multipath * mpp;
1316         struct path * pp;
1317         vector mpvec;
1318         int i;
1319
1320         if (!vecs->pathvec && !(vecs->pathvec = vector_alloc()))
1321                 return 1;
1322
1323         if (!vecs->mpvec && !(vecs->mpvec = vector_alloc()))
1324                 return 1;
1325
1326         if (!(mpvec = vector_alloc()))
1327                 return 1;
1328
1329         /*
1330          * probe for current path (from sysfs) and map (from dm) sets
1331          */
1332         path_discovery(vecs->pathvec, conf, DI_ALL);
1333
1334         vector_foreach_slot (vecs->pathvec, pp, i){
1335                 if (filter_path(conf, pp) > 0){
1336                         vector_del_slot(vecs->pathvec, i);
1337                         free_path(pp);
1338                         i--;
1339                 }
1340                 else
1341                         pp->checkint = conf->checkint;
1342         }
1343         if (map_discovery(vecs))
1344                 return 1;
1345
1346         /*
1347          * create new set of maps & push changed ones into dm
1348          */
1349         if (coalesce_paths(vecs, mpvec, NULL, 1))
1350                 return 1;
1351
1352         /*
1353          * may need to remove some maps which are no longer relevant
1354          * e.g., due to blacklist changes in conf file
1355          */
1356         if (coalesce_maps(vecs, mpvec))
1357                 return 1;
1358
1359         dm_lib_release();
1360
1361         sync_maps_state(mpvec);
1362         vector_foreach_slot(mpvec, mpp, i){
1363                 remember_wwid(mpp->wwid);
1364                 update_map_pr(mpp);
1365         }
1366
1367         /*
1368          * purge dm of old maps
1369          */
1370         remove_maps(vecs);
1371
1372         /*
1373          * save new set of maps formed by considering current path state
1374          */
1375         vector_free(vecs->mpvec);
1376         vecs->mpvec = mpvec;
1377
1378         /*
1379          * start dm event waiter threads for these new maps
1380          */
1381         vector_foreach_slot(vecs->mpvec, mpp, i) {
1382                 if (setup_multipath(vecs, mpp))
1383                         return 1;
1384                 if (start_waiters)
1385                         if (start_waiter_thread(mpp, vecs))
1386                                 return 1;
1387         }
1388         return 0;
1389 }
1390
1391 int
1392 reconfigure (struct vectors * vecs)
1393 {
1394         struct config * old = conf;
1395         int retval = 1;
1396
1397         /*
1398          * free old map and path vectors ... they use old conf state
1399          */
1400         if (VECTOR_SIZE(vecs->mpvec))
1401                 remove_maps_and_stop_waiters(vecs);
1402
1403         if (VECTOR_SIZE(vecs->pathvec))
1404                 free_pathvec(vecs->pathvec, FREE_PATHS);
1405
1406         vecs->pathvec = NULL;
1407         conf = NULL;
1408
1409         if (!load_config(DEFAULT_CONFIGFILE, udev)) {
1410                 conf->verbosity = old->verbosity;
1411                 conf->daemon = 1;
1412                 configure(vecs, 1);
1413                 free_config(old);
1414                 retval = 0;
1415         }
1416
1417         return retval;
1418 }
1419
1420 static struct vectors *
1421 init_vecs (void)
1422 {
1423         struct vectors * vecs;
1424
1425         vecs = (struct vectors *)MALLOC(sizeof(struct vectors));
1426
1427         if (!vecs)
1428                 return NULL;
1429
1430         vecs->lock.mutex =
1431                 (pthread_mutex_t *)MALLOC(sizeof(pthread_mutex_t));
1432
1433         if (!vecs->lock.mutex)
1434                 goto out;
1435
1436         pthread_mutex_init(vecs->lock.mutex, NULL);
1437         vecs->lock.depth = 0;
1438
1439         return vecs;
1440
1441 out:
1442         FREE(vecs);
1443         condlog(0, "failed to init paths");
1444         return NULL;
1445 }
1446
1447 static void *
1448 signal_set(int signo, void (*func) (int))
1449 {
1450         int r;
1451         struct sigaction sig;
1452         struct sigaction osig;
1453
1454         sig.sa_handler = func;
1455         sigemptyset(&sig.sa_mask);
1456         sig.sa_flags = 0;
1457
1458         r = sigaction(signo, &sig, &osig);
1459
1460         if (r < 0)
1461                 return (SIG_ERR);
1462         else
1463                 return (osig.sa_handler);
1464 }
1465
1466 void
1467 handle_signals(void)
1468 {
1469         if (reconfig_sig && running_state == DAEMON_RUNNING) {
1470                 condlog(2, "reconfigure (signal)");
1471                 pthread_cleanup_push(cleanup_lock,
1472                                 &gvecs->lock);
1473                 lock(gvecs->lock);
1474                 pthread_testcancel();
1475                 reconfigure(gvecs);
1476                 lock_cleanup_pop(gvecs->lock);
1477         }
1478         if (log_reset_sig) {
1479                 condlog(2, "reset log (signal)");
1480                 pthread_mutex_lock(&logq_lock);
1481                 log_reset("multipathd");
1482                 pthread_mutex_unlock(&logq_lock);
1483         }
1484         reconfig_sig = 0;
1485         log_reset_sig = 0;
1486 }
1487
1488 static void
1489 sighup (int sig)
1490 {
1491         reconfig_sig = 1;
1492 }
1493
1494 static void
1495 sigend (int sig)
1496 {
1497         exit_daemon();
1498 }
1499
1500 static void
1501 sigusr1 (int sig)
1502 {
1503         log_reset_sig = 1;
1504 }
1505
1506 static void
1507 signal_init(void)
1508 {
1509         sigset_t set;
1510
1511         sigemptyset(&set);
1512         sigaddset(&set, SIGHUP);
1513         sigaddset(&set, SIGUSR1);
1514         pthread_sigmask(SIG_BLOCK, &set, NULL);
1515
1516         signal_set(SIGHUP, sighup);
1517         signal_set(SIGUSR1, sigusr1);
1518         signal_set(SIGINT, sigend);
1519         signal_set(SIGTERM, sigend);
1520         signal(SIGPIPE, SIG_IGN);
1521 }
1522
1523 static void
1524 setscheduler (void)
1525 {
1526         int res;
1527         static struct sched_param sched_param = {
1528                 .sched_priority = 99
1529         };
1530
1531         res = sched_setscheduler (0, SCHED_RR, &sched_param);
1532
1533         if (res == -1)
1534                 condlog(LOG_WARNING, "Could not set SCHED_RR at priority 99");
1535         return;
1536 }
1537
1538 static void
1539 set_oom_adj (void)
1540 {
1541 #ifdef OOM_SCORE_ADJ_MIN
1542         int retry = 1;
1543         char *file = "/proc/self/oom_score_adj";
1544         int score = OOM_SCORE_ADJ_MIN;
1545 #else
1546         int retry = 0;
1547         char *file = "/proc/self/oom_adj";
1548         int score = OOM_ADJUST_MIN;
1549 #endif
1550         FILE *fp;
1551         struct stat st;
1552
1553         do {
1554                 if (stat(file, &st) == 0){
1555                         fp = fopen(file, "w");
1556                         if (!fp) {
1557                                 condlog(0, "couldn't fopen %s : %s", file,
1558                                         strerror(errno));
1559                                 return;
1560                         }
1561                         fprintf(fp, "%i", score);
1562                         fclose(fp);
1563                         return;
1564                 }
1565                 if (errno != ENOENT) {
1566                         condlog(0, "couldn't stat %s : %s", file,
1567                                 strerror(errno));
1568                         return;
1569                 }
1570 #ifdef OOM_ADJUST_MIN
1571                 file = "/proc/self/oom_adj";
1572                 score = OOM_ADJUST_MIN;
1573 #else
1574                 retry = 0;
1575 #endif
1576         } while (retry--);
1577         condlog(0, "couldn't adjust oom score");
1578 }
1579
1580 static int
1581 child (void * param)
1582 {
1583         pthread_t check_thr, uevent_thr, uxlsnr_thr, uevq_thr;
1584         pthread_attr_t log_attr, misc_attr, uevent_attr;
1585         struct vectors * vecs;
1586         struct multipath * mpp;
1587         int i;
1588         int rc, pid_rc;
1589
1590         mlockall(MCL_CURRENT | MCL_FUTURE);
1591         sem_init(&exit_sem, 0, 0);
1592         signal_init();
1593
1594         udev = udev_new();
1595
1596         setup_thread_attr(&misc_attr, 64 * 1024, 1);
1597         setup_thread_attr(&uevent_attr, 128 * 1024, 1);
1598         setup_thread_attr(&waiter_attr, 32 * 1024, 1);
1599
1600         if (logsink) {
1601                 setup_thread_attr(&log_attr, 64 * 1024, 0);
1602                 log_thread_start(&log_attr);
1603                 pthread_attr_destroy(&log_attr);
1604         }
1605
1606         running_state = DAEMON_START;
1607
1608         condlog(2, "--------start up--------");
1609         condlog(2, "read " DEFAULT_CONFIGFILE);
1610
1611         if (load_config(DEFAULT_CONFIGFILE, udev))
1612                 exit(1);
1613
1614         if (init_checkers()) {
1615                 condlog(0, "failed to initialize checkers");
1616                 exit(1);
1617         }
1618         if (init_prio()) {
1619                 condlog(0, "failed to initialize prioritizers");
1620                 exit(1);
1621         }
1622
1623         setlogmask(LOG_UPTO(conf->verbosity + 3));
1624
1625         if (conf->max_fds) {
1626                 struct rlimit fd_limit;
1627
1628                 if (getrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1629                         condlog(0, "can't get open fds limit: %s",
1630                                 strerror(errno));
1631                         fd_limit.rlim_cur = 0;
1632                         fd_limit.rlim_max = 0;
1633                 }
1634                 if (fd_limit.rlim_cur < conf->max_fds) {
1635                         fd_limit.rlim_cur = conf->max_fds;
1636                         if (fd_limit.rlim_max < conf->max_fds)
1637                                 fd_limit.rlim_max = conf->max_fds;
1638                         if (setrlimit(RLIMIT_NOFILE, &fd_limit) < 0) {
1639                                 condlog(0, "can't set open fds limit to "
1640                                         "%lu/%lu : %s",
1641                                         fd_limit.rlim_cur, fd_limit.rlim_max,
1642                                         strerror(errno));
1643                         } else {
1644                                 condlog(3, "set open fds limit to %lu/%lu",
1645                                         fd_limit.rlim_cur, fd_limit.rlim_max);
1646                         }
1647                 }
1648
1649         }
1650
1651         vecs = gvecs = init_vecs();
1652         if (!vecs)
1653                 exit(1);
1654
1655         setscheduler();
1656         set_oom_adj();
1657
1658         conf->daemon = 1;
1659         udev_set_sync_support(0);
1660         /*
1661          * Start uevent listener early to catch events
1662          */
1663         if ((rc = pthread_create(&uevent_thr, &uevent_attr, ueventloop, udev))) {
1664                 condlog(0, "failed to create uevent thread: %d", rc);
1665                 exit(1);
1666         }
1667         pthread_attr_destroy(&uevent_attr);
1668         if ((rc = pthread_create(&uxlsnr_thr, &misc_attr, uxlsnrloop, vecs))) {
1669                 condlog(0, "failed to create cli listener: %d", rc);
1670                 exit(1);
1671         }
1672         /*
1673          * fetch and configure both paths and multipaths
1674          */
1675         running_state = DAEMON_CONFIGURE;
1676
1677         lock(vecs->lock);
1678         if (configure(vecs, 1)) {
1679                 unlock(vecs->lock);
1680                 condlog(0, "failure during configuration");
1681                 exit(1);
1682         }
1683         unlock(vecs->lock);
1684
1685         /*
1686          * start threads
1687          */
1688         if ((rc = pthread_create(&check_thr, &misc_attr, checkerloop, vecs))) {
1689                 condlog(0,"failed to create checker loop thread: %d", rc);
1690                 exit(1);
1691         }
1692         if ((rc = pthread_create(&uevq_thr, &misc_attr, uevqloop, vecs))) {
1693                 condlog(0, "failed to create uevent dispatcher: %d", rc);
1694                 exit(1);
1695         }
1696         pthread_attr_destroy(&misc_attr);
1697
1698         /* Startup complete, create logfile */
1699         pid_rc = pidfile_create(DEFAULT_PIDFILE, daemon_pid);
1700         /* Ignore errors, we can live without */
1701
1702         running_state = DAEMON_RUNNING;
1703
1704         /*
1705          * exit path
1706          */
1707         while(sem_wait(&exit_sem) != 0); /* Do nothing */
1708         running_state = DAEMON_SHUTDOWN;
1709         lock(vecs->lock);
1710         if (conf->queue_without_daemon == QUE_NO_DAEMON_OFF)
1711                 vector_foreach_slot(vecs->mpvec, mpp, i)
1712                         dm_queue_if_no_path(mpp->alias, 0);
1713         remove_maps_and_stop_waiters(vecs);
1714         unlock(vecs->lock);
1715
1716         pthread_cancel(check_thr);
1717         pthread_cancel(uevent_thr);
1718         pthread_cancel(uxlsnr_thr);
1719         pthread_cancel(uevq_thr);
1720
1721         lock(vecs->lock);
1722         free_pathvec(vecs->pathvec, FREE_PATHS);
1723         vecs->pathvec = NULL;
1724         unlock(vecs->lock);
1725         /* Now all the waitevent threads will start rushing in. */
1726         while (vecs->lock.depth > 0) {
1727                 sleep (1); /* This is weak. */
1728                 condlog(3, "Have %d wait event checkers threads to de-alloc,"
1729                         " waiting...", vecs->lock.depth);
1730         }
1731         pthread_mutex_destroy(vecs->lock.mutex);
1732         FREE(vecs->lock.mutex);
1733         vecs->lock.depth = 0;
1734         vecs->lock.mutex = NULL;
1735         FREE(vecs);
1736         vecs = NULL;
1737
1738         cleanup_checkers();
1739         cleanup_prio();
1740
1741         dm_lib_release();
1742         dm_lib_exit();
1743
1744         /* We're done here */
1745         if (!pid_rc) {
1746                 condlog(3, "unlink pidfile");
1747                 unlink(DEFAULT_PIDFILE);
1748         }
1749
1750         condlog(2, "--------shut down-------");
1751
1752         if (logsink)
1753                 log_thread_stop();
1754
1755         /*
1756          * Freeing config must be done after condlog() and dm_lib_exit(),
1757          * because logging functions like dlog() and dm_write_log()
1758          * reference the config.
1759          */
1760         free_config(conf);
1761         conf = NULL;
1762         udev_unref(udev);
1763         udev = NULL;
1764 #ifdef _DEBUG_
1765         dbg_free_final(NULL);
1766 #endif
1767
1768         exit(0);
1769 }
1770
1771 static int
1772 daemonize(void)
1773 {
1774         int pid;
1775         int dev_null_fd;
1776
1777         if( (pid = fork()) < 0){
1778                 fprintf(stderr, "Failed first fork : %s\n", strerror(errno));
1779                 return -1;
1780         }
1781         else if (pid != 0)
1782                 return pid;
1783
1784         setsid();
1785
1786         if ( (pid = fork()) < 0)
1787                 fprintf(stderr, "Failed second fork : %s\n", strerror(errno));
1788         else if (pid != 0)
1789                 _exit(0);
1790
1791         if (chdir("/") < 0)
1792                 fprintf(stderr, "cannot chdir to '/', continuing\n");
1793
1794         dev_null_fd = open("/dev/null", O_RDWR);
1795         if (dev_null_fd < 0){
1796                 fprintf(stderr, "cannot open /dev/null for input & output : %s\n",
1797                         strerror(errno));
1798                 _exit(0);
1799         }
1800
1801         close(STDIN_FILENO);
1802         if (dup(dev_null_fd) < 0) {
1803                 fprintf(stderr, "cannot dup /dev/null to stdin : %s\n",
1804                         strerror(errno));
1805                 _exit(0);
1806         }
1807         close(STDOUT_FILENO);
1808         if (dup(dev_null_fd) < 0) {
1809                 fprintf(stderr, "cannot dup /dev/null to stdout : %s\n",
1810                         strerror(errno));
1811                 _exit(0);
1812         }
1813         close(STDERR_FILENO);
1814         if (dup(dev_null_fd) < 0) {
1815                 fprintf(stderr, "cannot dup /dev/null to stderr : %s\n",
1816                         strerror(errno));
1817                 _exit(0);
1818         }
1819         close(dev_null_fd);
1820         daemon_pid = getpid();
1821         return 0;
1822 }
1823
1824 int
1825 main (int argc, char *argv[])
1826 {
1827         extern char *optarg;
1828         extern int optind;
1829         int arg;
1830         int err;
1831
1832         logsink = 1;
1833         running_state = DAEMON_INIT;
1834         dm_init();
1835
1836         if (getuid() != 0) {
1837                 fprintf(stderr, "need to be root\n");
1838                 exit(1);
1839         }
1840
1841         /* make sure we don't lock any path */
1842         if (chdir("/") < 0)
1843                 fprintf(stderr, "can't chdir to root directory : %s\n",
1844                         strerror(errno));
1845         umask(umask(077) | 022);
1846
1847         conf = alloc_config();
1848
1849         if (!conf)
1850                 exit(1);
1851
1852         while ((arg = getopt(argc, argv, ":dv:k::")) != EOF ) {
1853         switch(arg) {
1854                 case 'd':
1855                         logsink = 0;
1856                         //debug=1; /* ### comment me out ### */
1857                         break;
1858                 case 'v':
1859                         if (sizeof(optarg) > sizeof(char *) ||
1860                             !isdigit(optarg[0]))
1861                                 exit(1);
1862
1863                         conf->verbosity = atoi(optarg);
1864                         break;
1865                 case 'k':
1866                         uxclnt(optarg);
1867                         exit(0);
1868                 default:
1869                         ;
1870                 }
1871         }
1872         if (optind < argc) {
1873                 char cmd[CMDSIZE];
1874                 char * s = cmd;
1875                 char * c = s;
1876
1877                 while (optind < argc) {
1878                         if (strchr(argv[optind], ' '))
1879                                 c += snprintf(c, s + CMDSIZE - c, "\"%s\" ", argv[optind]);
1880                         else
1881                                 c += snprintf(c, s + CMDSIZE - c, "%s ", argv[optind]);
1882                         optind++;
1883                 }
1884                 c += snprintf(c, s + CMDSIZE - c, "\n");
1885                 uxclnt(s);
1886                 exit(0);
1887         }
1888
1889         if (!logsink)
1890                 err = 0;
1891         else
1892                 err = daemonize();
1893
1894         if (err < 0)
1895                 /* error */
1896                 exit(1);
1897         else if (err > 0)
1898                 /* parent dies */
1899                 exit(0);
1900         else
1901                 /* child lives */
1902                 return (child(NULL));
1903 }
1904
1905 void *  mpath_pr_event_handler_fn (void * pathp )
1906 {
1907         struct multipath * mpp;
1908         int i,j, ret, isFound;
1909         struct path * pp = (struct path *)pathp;
1910         unsigned char *keyp;
1911         uint64_t prkey;
1912         struct prout_param_descriptor *param;
1913         struct prin_resp *resp;
1914
1915         mpp = pp->mpp;
1916
1917         resp = mpath_alloc_prin_response(MPATH_PRIN_RKEY_SA);
1918         if (!resp){
1919                 condlog(0,"%s Alloc failed for prin response", pp->dev);
1920                 return NULL;
1921         }
1922
1923         ret = prin_do_scsi_ioctl(pp->dev, MPATH_PRIN_RKEY_SA, resp, 0);
1924         if (ret != MPATH_PR_SUCCESS )
1925         {
1926                 condlog(0,"%s : pr in read keys service action failed. Error=%d", pp->dev, ret);
1927                 goto out;
1928         }
1929
1930         condlog(3, " event pr=%d addlen=%d",resp->prin_descriptor.prin_readkeys.prgeneration,
1931                         resp->prin_descriptor.prin_readkeys.additional_length );
1932
1933         if (resp->prin_descriptor.prin_readkeys.additional_length == 0 )
1934         {
1935                 condlog(1, "%s: No key found. Device may not be registered.", pp->dev);
1936                 ret = MPATH_PR_SUCCESS;
1937                 goto out;
1938         }
1939         prkey = 0;
1940         keyp = (unsigned char *)mpp->reservation_key;
1941         for (j = 0; j < 8; ++j) {
1942                 if (j > 0)
1943                         prkey <<= 8;
1944                 prkey |= *keyp;
1945                 ++keyp;
1946         }
1947         condlog(2, "Multipath  reservation_key: 0x%" PRIx64 " ", prkey);
1948
1949         isFound =0;
1950         for (i = 0; i < resp->prin_descriptor.prin_readkeys.additional_length/8; i++ )
1951         {
1952                 condlog(2, "PR IN READKEYS[%d]  reservation key:",i);
1953                 dumpHex((char *)&resp->prin_descriptor.prin_readkeys.key_list[i*8], 8 , -1);
1954                 if (!memcmp(mpp->reservation_key, &resp->prin_descriptor.prin_readkeys.key_list[i*8], 8))
1955                 {
1956                         condlog(2, "%s: pr key found in prin readkeys response", mpp->alias);
1957                         isFound =1;
1958                         break;
1959                 }
1960         }
1961         if (!isFound)
1962         {
1963                 condlog(0, "%s: Either device not registered or ", pp->dev);
1964                 condlog(0, "host is not authorised for registration. Skip path");
1965                 ret = MPATH_PR_OTHER;
1966                 goto out;
1967         }
1968
1969         param= malloc(sizeof(struct prout_param_descriptor));
1970         memset(param, 0 , sizeof(struct prout_param_descriptor));
1971
1972         for (j = 7; j >= 0; --j) {
1973                 param->sa_key[j] = (prkey & 0xff);
1974                 prkey >>= 8;
1975         }
1976         param->num_transportid = 0;
1977
1978         condlog(3, "device %s:%s", pp->dev, pp->mpp->wwid);
1979
1980         ret = prout_do_scsi_ioctl(pp->dev, MPATH_PROUT_REG_IGN_SA, 0, 0, param, 0);
1981         if (ret != MPATH_PR_SUCCESS )
1982         {
1983                 condlog(0,"%s: Reservation registration failed. Error: %d", pp->dev, ret);
1984         }
1985         mpp->prflag = 1;
1986
1987         free(param);
1988 out:
1989         free(resp);
1990         return NULL;
1991 }
1992
1993 int mpath_pr_event_handle(struct path *pp)
1994 {
1995         pthread_t thread;
1996         int rc;
1997         pthread_attr_t attr;
1998         struct multipath * mpp;
1999
2000         mpp = pp->mpp;
2001
2002         if (!mpp->reservation_key)
2003                 return -1;
2004
2005         pthread_attr_init(&attr);
2006         pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
2007
2008         rc = pthread_create(&thread, NULL , mpath_pr_event_handler_fn, pp);
2009         if (rc) {
2010                 condlog(0, "%s: ERROR; return code from pthread_create() is %d", pp->dev, rc);
2011                 return -1;
2012         }
2013         pthread_attr_destroy(&attr);
2014         rc = pthread_join(thread, NULL);
2015         return 0;
2016 }
2017