public inbox for lvm2-cvs@sourceware.org
help / color / mirror / Atom feed
* LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ ...
@ 2011-01-12 20:42 agk
  0 siblings, 0 replies; 6+ messages in thread
From: agk @ 2011-01-12 20:42 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	agk@sourceware.org	2011-01-12 20:42:51

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvm.h clvmd-command.c lvm-functions.c 
	lib/activate   : fs.c 
	lib/locking    : cluster_locking.c file_locking.c locking.c 
	                 locking.h no_locking.c 
	lib/metadata   : lv_manip.c metadata-exported.h metadata.c 
	lib/misc       : lvm-exec.c 
	libdm          : libdm-common.c 
	tools          : polldaemon.c 

Log message:
	Replace fs_unlock by sync_local_dev_names to notify local clvmd. (2.02.80)
	Introduce sync_local_dev_names and CLVMD_CMD_SYNC_NAMES to issue fs_unlock.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.1877&r2=1.1878
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-command.c.diff?cvsroot=lvm2&r1=1.46&r2=1.47
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.c.diff?cvsroot=lvm2&r1=1.105&r2=1.106
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/activate/fs.c.diff?cvsroot=lvm2&r1=1.55&r2=1.56
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/cluster_locking.c.diff?cvsroot=lvm2&r1=1.48&r2=1.49
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/file_locking.c.diff?cvsroot=lvm2&r1=1.53&r2=1.54
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/locking.c.diff?cvsroot=lvm2&r1=1.87&r2=1.88
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/locking.h.diff?cvsroot=lvm2&r1=1.59&r2=1.60
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/no_locking.c.diff?cvsroot=lvm2&r1=1.24&r2=1.25
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/lv_manip.c.diff?cvsroot=lvm2&r1=1.243&r2=1.244
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/metadata-exported.h.diff?cvsroot=lvm2&r1=1.173&r2=1.174
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/metadata.c.diff?cvsroot=lvm2&r1=1.419&r2=1.420
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/misc/lvm-exec.c.diff?cvsroot=lvm2&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/libdm/libdm-common.c.diff?cvsroot=lvm2&r1=1.106&r2=1.107
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/tools/polldaemon.c.diff?cvsroot=lvm2&r1=1.41&r2=1.42

--- LVM2/WHATS_NEW	2011/01/12 15:28:33	1.1877
+++ LVM2/WHATS_NEW	2011/01/12 20:42:50	1.1878
@@ -1,5 +1,7 @@
 Version 2.02.81 -
 ===================================
+  Replace fs_unlock by sync_local_dev_names to notify local clvmd. (2.02.80)
+  Introduce sync_local_dev_names and CLVMD_CMD_SYNC_NAMES to issue fs_unlock.
   Accept fusion fio in device type filter.
   Add disk to mirrored log type conversion.
 
--- LVM2/daemons/clvmd/clvm.h	2010/07/28 13:55:43	1.9
+++ LVM2/daemons/clvmd/clvm.h	2011/01/12 20:42:50	1.10
@@ -71,4 +71,5 @@
 #define CLVMD_CMD_SET_DEBUG	    42
 #define CLVMD_CMD_VG_BACKUP	    43
 #define CLVMD_CMD_RESTART	    44
+#define CLVMD_CMD_SYNC_NAMES	    45
 #endif
--- LVM2/daemons/clvmd/clvmd-command.c	2011/01/10 14:02:30	1.46
+++ LVM2/daemons/clvmd/clvmd-command.c	2011/01/12 20:42:50	1.47
@@ -139,6 +139,10 @@
 		do_refresh_cache();
 		break;
 
+	case CLVMD_CMD_SYNC_NAMES:
+		lvm_do_fs_unlock();
+		break;
+
 	case CLVMD_CMD_SET_DEBUG:
 		debug = args[0];
 		break;
@@ -275,6 +279,7 @@
 	case CLVMD_CMD_GET_CLUSTERNAME:
 	case CLVMD_CMD_SET_DEBUG:
 	case CLVMD_CMD_VG_BACKUP:
+	case CLVMD_CMD_SYNC_NAMES:
 	case CLVMD_CMD_LOCK_QUERY:
 	case CLVMD_CMD_RESTART:
 		break;
@@ -307,6 +312,7 @@
 
 	case CLVMD_CMD_LOCK_VG:
 	case CLVMD_CMD_VG_BACKUP:
+	case CLVMD_CMD_SYNC_NAMES:
 	case CLVMD_CMD_LOCK_QUERY:
 		/* Nothing to do here */
 		break;
--- LVM2/daemons/clvmd/lvm-functions.c	2011/01/10 14:02:30	1.105
+++ LVM2/daemons/clvmd/lvm-functions.c	2011/01/12 20:42:50	1.106
@@ -897,6 +897,7 @@
 void lvm_do_fs_unlock(void)
 {
 	pthread_mutex_lock(&lvm_lock);
+	DEBUGLOG("Syncing device names\n");
 	fs_unlock();
 	pthread_mutex_unlock(&lvm_lock);
 }
--- LVM2/lib/activate/fs.c	2011/01/10 14:02:31	1.55
+++ LVM2/lib/activate/fs.c	2011/01/12 20:42:50	1.56
@@ -403,6 +403,7 @@
 void fs_unlock(void)
 {
 	if (!memlock()) {
+		log_debug("Syncing device names");
 		/* Wait for all processed udev devices */
 		if (!dm_udev_wait(_fs_cookie))
 			stack;
--- LVM2/lib/locking/cluster_locking.c	2011/01/05 15:10:30	1.48
+++ LVM2/lib/locking/cluster_locking.c	2011/01/12 20:42:50	1.49
@@ -345,14 +345,15 @@
 	 * locks are cluster-wide.
 	 * Also, if the lock is exclusive it makes no sense to try to
 	 * acquire it on all nodes, so just do that on the local node too.
-	 * One exception, is that P_ locks /do/ get distributed across
-	 * the cluster because they might have side-effects.
+	 * One exception, is that P_ locks (except VG_SYNC_NAMES) /do/ get 
+	 * distributed across the cluster because they might have side-effects.
 	 */
-	if (strncmp(name, "P_", 2) &&
-	    (clvmd_cmd == CLVMD_CMD_LOCK_VG ||
-	     (flags & LCK_TYPE_MASK) == LCK_EXCL ||
-	     (flags & LCK_LOCAL) ||
-	     !(flags & LCK_CLUSTER_VG)))
+	if ((strncmp(name, "P_", 2) &&
+	     (clvmd_cmd == CLVMD_CMD_LOCK_VG ||
+	      (flags & LCK_TYPE_MASK) == LCK_EXCL ||
+	      (flags & LCK_LOCAL) ||
+	      !(flags & LCK_CLUSTER_VG))) ||
+	    (clvmd_cmd == CLVMD_CMD_SYNC_NAMES && (flags & LCK_LOCAL)))
 		node = ".";
 
 	status = _cluster_request(clvmd_cmd, node, args, len,
@@ -401,6 +402,11 @@
 
 	switch (flags & LCK_SCOPE_MASK) {
 	case LCK_VG:
+		if (!strcmp(resource, VG_SYNC_NAMES)) {
+			log_very_verbose("Requesting sync names.");
+			return _lock_for_cluster(cmd, CLVMD_CMD_SYNC_NAMES,
+						 flags & ~LCK_HOLD, resource);
+		}
 		if (flags == LCK_VG_BACKUP) {
 			log_very_verbose("Requesting backup of VG metadata for %s",
 					 resource);
--- LVM2/lib/locking/file_locking.c	2011/01/10 14:02:31	1.53
+++ LVM2/lib/locking/file_locking.c	2011/01/12 20:42:50	1.54
@@ -265,6 +265,9 @@
 		if (strcmp(resource, VG_GLOBAL))
 			lvmcache_drop_metadata(resource, 0);
 
+		if (!strcmp(resource, VG_SYNC_NAMES))
+			fs_unlock();
+
 		/* LCK_CACHE does not require a real lock */
 		if (flags & LCK_CACHE)
 			break;
--- LVM2/lib/locking/locking.c	2010/10/25 11:20:55	1.87
+++ LVM2/lib/locking/locking.c	2011/01/12 20:42:50	1.88
@@ -325,7 +325,7 @@
 	char path[PATH_MAX];
 
 	/* We'll allow operations on orphans */
-	if (is_orphan_vg(vgname) || is_global_vg(vgname))
+	if (!is_real_vg(vgname))
 		return 1;
 
 	/* LVM1 is only present in 2.4 kernels. */
--- LVM2/lib/locking/locking.h	2010/12/08 20:50:50	1.59
+++ LVM2/lib/locking/locking.h	2011/01/12 20:42:51	1.60
@@ -109,6 +109,7 @@
  */
 #define VG_ORPHANS	"#orphans"
 #define VG_GLOBAL	"#global"
+#define VG_SYNC_NAMES	"#sync_names"
 
 /*
  * Common combinations
@@ -169,6 +170,8 @@
 	lock_vol((vg)->cmd, (vg)->name, LCK_VG_REVERT)
 #define remote_backup_metadata(vg)	\
 	lock_vol((vg)->cmd, (vg)->name, LCK_VG_BACKUP)
+#define sync_local_dev_names(cmd)	\
+	lock_vol(cmd, VG_SYNC_NAMES, LCK_NONE | LCK_CACHE | LCK_LOCAL)
 
 /* Process list of LVs */
 int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs);
--- LVM2/lib/locking/no_locking.c	2010/08/17 19:25:05	1.24
+++ LVM2/lib/locking/no_locking.c	2011/01/12 20:42:51	1.25
@@ -38,6 +38,8 @@
 {
 	switch (flags & LCK_SCOPE_MASK) {
 	case LCK_VG:
+		if (!strcmp(resource, VG_SYNC_NAMES))
+			fs_unlock();
 		break;
 	case LCK_LV:
 		switch (flags & LCK_TYPE_MASK) {
--- LVM2/lib/metadata/lv_manip.c	2011/01/11 17:05:09	1.243
+++ LVM2/lib/metadata/lv_manip.c	2011/01/12 20:42:51	1.244
@@ -3021,7 +3021,7 @@
 		return 0;
 	}
 
-	fs_unlock();  /* Wait until devices are available */
+	sync_local_dev_names(cmd);  /* Wait until devices are available */
 
 	log_verbose("Clearing start of logical volume \"%s\"", lv->name);
 
--- LVM2/lib/metadata/metadata-exported.h	2010/12/08 20:50:50	1.173
+++ LVM2/lib/metadata/metadata-exported.h	2011/01/12 20:42:51	1.174
@@ -362,6 +362,7 @@
 			const char *lv_name);
 int is_global_vg(const char *vg_name);
 int is_orphan_vg(const char *vg_name);
+int is_real_vg(const char *vg_name);
 int vg_missing_pv_count(const struct volume_group *vg);
 int vgs_are_compatible(struct cmd_context *cmd,
 		       struct volume_group *vg_from,
--- LVM2/lib/metadata/metadata.c	2010/12/22 15:36:41	1.419
+++ LVM2/lib/metadata/metadata.c	2011/01/12 20:42:51	1.420
@@ -3561,6 +3561,14 @@
 }
 
 /*
+ * Exclude pseudo VG names used for locking.
+ */
+int is_real_vg(const char *vg_name)
+{
+	return (vg_name && *vg_name != '#');
+}
+
+/*
  * Returns:
  *  0 - fail
  *  1 - success
--- LVM2/lib/misc/lvm-exec.c	2011/01/10 19:49:42	1.10
+++ LVM2/lib/misc/lvm-exec.c	2011/01/12 20:42:51	1.11
@@ -55,7 +55,7 @@
 
 	log_verbose("Executing: %s", _verbose_args(argv, buf, sizeof(buf)));
 
-	fs_unlock(); /* Flush oops and ensure cookie is not shared */
+	sync_local_dev_names(cmd); /* Flush ops and reset dm cookie */
 
 	if ((pid = fork()) == -1) {
 		log_error("fork failed: %s", strerror(errno));
--- LVM2/libdm/libdm-common.c	2010/12/13 12:44:09	1.106
+++ LVM2/libdm/libdm-common.c	2011/01/12 20:42:51	1.107
@@ -507,7 +507,7 @@
 	(void) dm_prepare_selinux_context(path, S_IFBLK);
 	old_mask = umask(0);
 	if (mknod(path, S_IFBLK | mode, dev) < 0) {
-		log_error("Unable to make device node for '%s'", dev_name);
+		log_error("%s: mknod for %s failed: %s", path, dev_name, strerror(errno));
 		umask(old_mask);
 		(void) dm_prepare_selinux_context(NULL, 0);
 		return 0;
--- LVM2/tools/polldaemon.c	2011/01/10 19:31:02	1.41
+++ LVM2/tools/polldaemon.c	2011/01/12 20:42:51	1.42
@@ -42,7 +42,7 @@
 
 	sigaction(SIGCHLD, &act, NULL);
 
-	fs_unlock(); /* Flush oops and ensure cookie is not shared */
+	sync_local_dev_names(cmd); /* Flush ops and reset dm cookie */
 
 	if ((pid = fork()) == -1) {
 		log_error("fork failed: %s", strerror(errno));


^ permalink raw reply	[flat|nested] 6+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ ...
@ 2010-04-20 14:07 ccaulfield
  0 siblings, 0 replies; 6+ messages in thread
From: ccaulfield @ 2010-04-20 14:07 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	ccaulfield@sourceware.org	2010-04-20 14:07:39

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvm.h clvmd-command.c clvmd.c lvm-functions.c 
	                 lvm-functions.h refresh_clvmd.c refresh_clvmd.h 
	man            : clvmd.8.in 
	scripts        : clvmd_init_red_hat.in 

Log message:
	Add -S command to clvmd, so it can restart itself and still
	preserve exlusive LV locks.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.1530&r2=1.1531
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-command.c.diff?cvsroot=lvm2&r1=1.31&r2=1.32
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd.c.diff?cvsroot=lvm2&r1=1.69&r2=1.70
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.c.diff?cvsroot=lvm2&r1=1.88&r2=1.89
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.h.diff?cvsroot=lvm2&r1=1.12&r2=1.13
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/refresh_clvmd.c.diff?cvsroot=lvm2&r1=1.7&r2=1.8
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/refresh_clvmd.h.diff?cvsroot=lvm2&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/man/clvmd.8.in.diff?cvsroot=lvm2&r1=1.2&r2=1.3
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/scripts/clvmd_init_red_hat.in.diff?cvsroot=lvm2&r1=1.5&r2=1.6

--- LVM2/WHATS_NEW	2010/04/19 15:24:00	1.1530
+++ LVM2/WHATS_NEW	2010/04/20 14:07:37	1.1531
@@ -1,5 +1,6 @@
 Version 2.02.64 -
 =================================
+  Add -S command to clvmd to restart the daemon preserving exclusive locks.
   Increment lvm2app version from 1 to 2.
   Change lvm2app memory alloc/free for pv/vg/lv properties.
   Change daemon lock filename from lvm2_monitor to lvm2-monitor for consistency.
--- LVM2/daemons/clvmd/clvm.h	2009/05/19 10:38:58	1.7
+++ LVM2/daemons/clvmd/clvm.h	2010/04/20 14:07:38	1.8
@@ -69,4 +69,5 @@
 #define CLVMD_CMD_GET_CLUSTERNAME   41
 #define CLVMD_CMD_SET_DEBUG	    42
 #define CLVMD_CMD_VG_BACKUP	    43
+#define CLVMD_CMD_RESTART	    44
 #endif
--- LVM2/daemons/clvmd/clvmd-command.c	2010/01/05 16:07:57	1.31
+++ LVM2/daemons/clvmd/clvmd-command.c	2010/04/20 14:07:38	1.32
@@ -80,6 +80,7 @@
 
 extern debug_t debug;
 extern struct cluster_ops *clops;
+static int restart_clvmd(void);
 
 /* This is where all the real work happens:
    NOTE: client will be NULL when this is executed on a remote node */
@@ -158,6 +159,10 @@
 		debug = args[0];
 		break;
 
+	case CLVMD_CMD_RESTART:
+		restart_clvmd();
+		break;
+
 	case CLVMD_CMD_GET_CLUSTERNAME:
 		status = clops->get_cluster_name(*buf, buflen);
 		if (!status)
@@ -285,6 +290,7 @@
 	case CLVMD_CMD_SET_DEBUG:
 	case CLVMD_CMD_VG_BACKUP:
 	case CLVMD_CMD_LOCK_QUERY:
+	case CLVMD_CMD_RESTART:
 		break;
 
 	default:
@@ -351,3 +357,50 @@
 	client->bits.localsock.private = 0;
     }
 }
+
+
+static int restart_clvmd(void)
+{
+	char *argv[1024];
+	int argc = 1;
+	struct dm_hash_node *hn = NULL;
+	char *lv_name;
+
+	DEBUGLOG("clvmd restart requested\n");
+
+	/*
+	 * Build the command-line
+	 */
+	argv[0] = strdup("clvmd");
+
+	/* Propogate debug options */
+	if (debug) {
+		char debug_level[16];
+
+		sprintf(debug_level, "-d%d", debug);
+		argv[argc++] = strdup(debug_level);
+	}
+
+	/* Now add the exclusively-open LVs */
+	do {
+		hn = get_next_excl_lock(hn, &lv_name);
+		if (lv_name) {
+			argv[argc++] = strdup("-E");
+			argv[argc++] = strdup(lv_name);
+
+			DEBUGLOG("excl lock: %s\n", lv_name);
+			hn = get_next_excl_lock(hn, &lv_name);
+		}
+	} while (hn && *lv_name);
+	argv[argc++] = NULL;
+
+	/* Tidy up */
+	destroy_lvm();
+
+	/* Exec new clvmd */
+	/* NOTE: This will fail when downgrading! */
+	execve("clvmd", argv, NULL);
+
+	/* We failed */
+	return 0;
+}
--- LVM2/daemons/clvmd/clvmd.c	2010/04/14 18:54:37	1.69
+++ LVM2/daemons/clvmd/clvmd.c	2010/04/20 14:07:38	1.70
@@ -92,6 +92,11 @@
 	unsigned short xid;
 };
 
+struct lvm_startup_params {
+	int using_gulm;
+	char **argv;
+};
+
 debug_t debug;
 static pthread_t lvm_thread;
 static pthread_mutex_t lvm_thread_mutex;
@@ -163,6 +168,7 @@
 	fprintf(file, "   -d       Set debug level\n");
 	fprintf(file, "            If starting clvmd then don't fork, run in the foreground\n");
 	fprintf(file, "   -R       Tell all running clvmds in the cluster to reload their device cache\n");
+	fprintf(file, "   -S       Restart clvmd, preserving exclusive locks\n");
 	fprintf(file, "   -C       Sets debug level (from -d) on all clvmd instances clusterwide\n");
 	fprintf(file, "   -t<secs> Command timeout (default 60 seconds)\n");
 	fprintf(file, "   -T<secs> Startup timeout (default none)\n");
@@ -268,6 +274,9 @@
 	case CLVMD_CMD_LOCK_QUERY:
 		command = "LOCK_QUERY";
 		break;
+	case CLVMD_CMD_RESTART:
+		command = "RESTART";
+		break;
 	default:
 		command = "unknown";
 		break;
@@ -283,6 +292,7 @@
 	int local_sock;
 	struct local_client *newfd;
 	struct utsname nodeinfo;
+	struct lvm_startup_params lvm_params;
 	signed char opt;
 	int cmd_timeout = DEFAULT_CMD_TIMEOUT;
 	int start_timeout = 0;
@@ -295,7 +305,7 @@
 	/* Deal with command-line arguments */
 	opterr = 0;
 	optind = 0;
-	while ((opt = getopt(argc, argv, "?vVhd::t:RT:CI:")) != EOF) {
+	while ((opt = getopt(argc, argv, "?vVhd::t:RST:CI:E:")) != EOF) {
 		switch (opt) {
 		case 'h':
 			usage(argv[0], stdout);
@@ -306,7 +316,10 @@
 			exit(0);
 
 		case 'R':
-			return refresh_clvmd()==1?0:1;
+			return refresh_clvmd(1)==1?0:1;
+
+		case 'S':
+			return restart_clvmd(clusterwide_opt)==1?0:1;
 
 		case 'C':
 			clusterwide_opt = 1;
@@ -489,8 +502,10 @@
 
 	/* Don't let anyone else to do work until we are started */
 	pthread_mutex_lock(&lvm_start_mutex);
+	lvm_params.using_gulm = using_gulm;
+	lvm_params.argv = argv;
 	pthread_create(&lvm_thread, NULL, (lvm_pthread_fn_t*)lvm_thread_fn,
-			(void *)(long)using_gulm);
+			(void *)&lvm_params);
 
 	/* Tell the rest of the cluster our version number */
 	/* CMAN can do this immediately, gulm needs to wait until
@@ -551,6 +566,10 @@
 			close(client_fd);
 			return 1;
 		}
+
+		if (fcntl(client_fd, F_SETFD, 1))
+			DEBUGLOG("setting CLOEXEC on client fd failed: %s\n", strerror(errno));
+
 		newfd->fd = client_fd;
 		newfd->type = LOCAL_SOCK;
 		newfd->xid = 0;
@@ -1182,6 +1201,12 @@
 		}
 		DEBUGLOG("creating pipe, [%d, %d]\n", comms_pipe[0],
 			 comms_pipe[1]);
+
+		if (fcntl(comms_pipe[0], F_SETFD, 1))
+			DEBUGLOG("setting CLOEXEC on pipe[0] failed: %s\n", strerror(errno));
+		if (fcntl(comms_pipe[1], F_SETFD, 1))
+			DEBUGLOG("setting CLOEXEC on pipe[1] failed: %s\n", strerror(errno));
+
 		newfd->fd = comms_pipe[0];
 		newfd->removeme = 0;
 		newfd->type = THREAD_PIPE;
@@ -1830,7 +1855,7 @@
 {
 	struct dm_list *cmdl, *tmp;
 	sigset_t ss;
-	int using_gulm = (int)(long)arg;
+	struct lvm_startup_params *lvm_params = arg;
 
 	DEBUGLOG("LVM thread function started\n");
 
@@ -1841,7 +1866,7 @@
 	pthread_sigmask(SIG_BLOCK, &ss, NULL);
 
 	/* Initialise the interface to liblvm */
-	init_lvm(using_gulm);
+	init_lvm(lvm_params->using_gulm, lvm_params->argv);
 
 	/* Allow others to get moving */
 	pthread_mutex_unlock(&lvm_start_mutex);
@@ -1956,8 +1981,10 @@
 		log_error("Can't create local socket: %m");
 		return -1;
 	}
+
 	/* Set Close-on-exec & non-blocking */
-	fcntl(local_socket, F_SETFD, 1);
+	if (fcntl(local_socket, F_SETFD, 1))
+		DEBUGLOG("setting CLOEXEC on local_socket failed: %s\n", strerror(errno));
 	fcntl(local_socket, F_SETFL, fcntl(local_socket, F_GETFL, 0) | O_NONBLOCK);
 
 	memset(&sockaddr, 0, sizeof(sockaddr));
--- LVM2/daemons/clvmd/lvm-functions.c	2010/04/13 19:54:16	1.88
+++ LVM2/daemons/clvmd/lvm-functions.c	2010/04/20 14:07:38	1.89
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
  *
  * This file is part of LVM2.
  *
@@ -103,7 +103,7 @@
 		command = "LCK_VG";
 		break;
 	case LCK_LV:
-		scope = "LV"; 
+		scope = "LV";
 		switch (cmdl & LCK_MASK) {
 		case LCK_LV_EXCLUSIVE & LCK_MASK:
 			command = "LCK_LV_EXCLUSIVE";
@@ -727,12 +727,36 @@
 }
 
 /*
+ * Compare the uuid with the list of exclusive locks that clvmd
+ * held before it was restarted, so we can get the right kind
+ * of lock now we are restarting.
+ */
+static int was_ex_lock(char *uuid, char **argv)
+{
+	int optnum = 0;
+	char *opt = argv[optnum];
+
+	while (opt) {
+		if (strcmp(opt, "-E") == 0) {
+			opt = argv[++optnum];
+			if (opt && (strcmp(opt, uuid) == 0)) {
+				DEBUGLOG("Lock %s is exclusive\n", uuid);
+				return 1;
+			}
+		}
+		opt = argv[++optnum];
+	}
+	return 0;
+}
+
+/*
  * Ideally, clvmd should be started before any LVs are active
  * but this may not be the case...
  * I suppose this also comes in handy if clvmd crashes, not that it would!
  */
-static void *get_initial_state()
+static void *get_initial_state(char **argv)
 {
+	int lock_mode;
 	char lv[64], vg[64], flags[25], vg_flags[25];
 	char uuid[65];
 	char line[255];
@@ -768,8 +792,15 @@
 				memcpy(&uuid[58], &lv[32], 6);
 				uuid[64] = '\0';
 
+				lock_mode = LKM_CRMODE;
+
+				/* Look for this lock in the list of EX locks
+				   we were passed on the command-line */
+				if (was_ex_lock(uuid, argv))
+					lock_mode = LKM_EXMODE;
+
 				DEBUGLOG("getting initial lock for %s\n", uuid);
-				hold_lock(uuid, LKM_CRMODE, LKF_NOQUEUE);
+				hold_lock(uuid, lock_mode, LKF_NOQUEUE);
 			}
 		}
 	}
@@ -848,8 +879,31 @@
 	pthread_mutex_unlock(&lvm_lock);
 }
 
+struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name)
+{
+	struct lv_info *lvi;
+
+	*name = NULL;
+	if (!v)
+		v = dm_hash_get_first(lv_hash);
+
+	do {
+		if (v) {
+			lvi = dm_hash_get_data(lv_hash, v);
+			DEBUGLOG("Looking for EX locks. found %x mode %d\n", lvi->lock_id, lvi->lock_mode);
+
+			if (lvi->lock_mode == LCK_EXCL) {
+				*name = dm_hash_get_key(lv_hash, v);
+			}
+			v = dm_hash_get_next(lv_hash, v);
+		}
+	} while (v && !*name);
+	DEBUGLOG("returning EXclusive UUID %s\n", *name);
+	return v;
+}
+
 /* Called to initialise the LVM context of the daemon */
-int init_lvm(int using_gulm)
+int init_lvm(int using_gulm, char **argv)
 {
 	if (!(cmd = create_toolcontext(1, NULL))) {
 		log_error("Failed to allocate command context");
@@ -874,7 +928,7 @@
 	if (using_gulm)
 		drop_vg_locks();
 
-	get_initial_state();
+	get_initial_state(argv);
 
 	/* Trap log messages so we can pass them back to the user */
 	init_log_fn(lvm2_log_fn);
--- LVM2/daemons/clvmd/lvm-functions.h	2010/01/05 16:05:12	1.12
+++ LVM2/daemons/clvmd/lvm-functions.h	2010/04/20 14:07:38	1.13
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
  *
  * This file is part of LVM2.
  *
@@ -27,7 +27,7 @@
 			char *resource);
 extern int do_check_lvm1(const char *vgname);
 extern int do_refresh_cache(void);
-extern int init_lvm(int using_gulm);
+extern int init_lvm(int using_gulm, char **argv);
 extern void destroy_lvm(void);
 extern void init_lvhash(void);
 extern void destroy_lvhash(void);
@@ -37,5 +37,5 @@
 extern char *get_last_lvm_error(void);
 extern void do_lock_vg(unsigned char command, unsigned char lock_flags,
 		      char *resource);
-
+extern struct dm_hash_node *get_next_excl_lock(struct dm_hash_node *v, char **name);
 #endif
--- LVM2/daemons/clvmd/refresh_clvmd.c	2009/09/15 12:51:28	1.7
+++ LVM2/daemons/clvmd/refresh_clvmd.c	2010/04/20 14:07:38	1.8
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
  *
  * This file is part of LVM2.
  *
@@ -14,7 +14,7 @@
  */
 
 /*
- * Tell all clvmds in a cluster to refresh their toolcontext
+ * Send a command to a running clvmd from the command-line
  */
 
 #define _GNU_SOURCE
@@ -83,7 +83,7 @@
 }
 
 /* Send a request and return the status */
-static int _send_request(const char *inbuf, int inlen, char **retbuf)
+static int _send_request(const char *inbuf, int inlen, char **retbuf, int no_response)
 {
 	char outbuf[PIPE_BUF];
 	struct clvm_header *outheader = (struct clvm_header *) outbuf;
@@ -100,6 +100,8 @@
 		fprintf(stderr, "Error writing data to clvmd: %s", strerror(errno));
 		return 0;
 	}
+	if (no_response)
+		return 1;
 
 	/* Get the response */
  reread:
@@ -184,7 +186,7 @@
  * Send a message to a(or all) node(s) in the cluster and wait for replies
  */
 static int _cluster_request(char cmd, const char *node, void *data, int len,
-			   lvm_response_t ** response, int *num)
+			    lvm_response_t ** response, int *num, int no_response)
 {
 	char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1];
 	char *inptr;
@@ -207,8 +209,8 @@
 	memcpy(head->node + strlen(head->node) + 1, data, len);
 
 	status = _send_request(outbuf, sizeof(struct clvm_header) +
-			      strlen(head->node) + len, &retbuf);
-	if (!status)
+			       strlen(head->node) + len, &retbuf, no_response);
+	if (!status || no_response)
 		goto out;
 
 	/* Count the number of responses we got */
@@ -287,7 +289,7 @@
 	return 1;
 }
 
-int refresh_clvmd()
+int refresh_clvmd(int all_nodes)
 {
 	int num_responses;
 	char args[1]; // No args really.
@@ -296,7 +298,7 @@
 	int status;
 	int i;
 
-	status = _cluster_request(CLVMD_CMD_REFRESH, "*", args, 0, &response, &num_responses);
+	status = _cluster_request(CLVMD_CMD_REFRESH, all_nodes?"*":".", args, 0, &response, &num_responses, 0);
 
 	/* If any nodes were down then display them and return an error */
 	for (i = 0; i < num_responses; i++) {
@@ -323,6 +325,12 @@
 	return status;
 }
 
+int restart_clvmd(int all_nodes)
+{
+	int dummy;
+	return _cluster_request(CLVMD_CMD_RESTART, all_nodes?"*":".", NULL, 0, NULL, &dummy, 1);
+}
+
 int debug_clvmd(int level, int clusterwide)
 {
 	int num_responses;
@@ -339,7 +347,7 @@
 	else
 		nodes = ".";
 
-	status = _cluster_request(CLVMD_CMD_SET_DEBUG, nodes, args, 1, &response, &num_responses);
+	status = _cluster_request(CLVMD_CMD_SET_DEBUG, nodes, args, 1, &response, &num_responses, 0);
 
 	/* If any nodes were down then display them and return an error */
 	for (i = 0; i < num_responses; i++) {
--- LVM2/daemons/clvmd/refresh_clvmd.h	2007/08/17 11:51:23	1.2
+++ LVM2/daemons/clvmd/refresh_clvmd.h	2010/04/20 14:07:38	1.3
@@ -13,6 +13,7 @@
  */
 
 
-int refresh_clvmd(void);
+int refresh_clvmd(int all_nodes);
+int restart_clvmd(int all_nodes);
 int debug_clvmd(int level, int clusterwide);
 
--- LVM2/man/clvmd.8.in	2009/07/30 13:32:39	1.2
+++ LVM2/man/clvmd.8.in	2010/04/20 14:07:38	1.3
@@ -5,6 +5,7 @@
 .B clvmd
 [\-d [<value>]] [\-C] [\-h]
 [\-R]
+[\-S]
 [\-t <timeout>]
 [\-T <start timeout>]
 [\-V]
@@ -74,6 +75,12 @@
 re-read the lvm configuration file. This command should be run whenever the
 devices on a cluster system are changed.
 .TP
+.I \-S
+Tells the running clvmd to exit and restart. This is a preferred option
+to killing and restarting clvmd as it will preserve exclusive LV locks.
+If a full stop & restart is done instead, exclusive LV locks will be
+re-acquired as shared.
+.TP
 .I \-I
 Selects the cluster manager to use for locking and internal communications,
 the available managers will be listed as part of the 'clvmd -h' output.
--- LVM2/scripts/clvmd_init_red_hat.in	2010/02/26 13:07:43	1.5
+++ LVM2/scripts/clvmd_init_red_hat.in	2010/04/20 14:07:38	1.6
@@ -146,7 +146,17 @@
 	# another start. Even if start is protected by rh_status_q,
 	# that would avoid spawning another daemon, it would try to
 	# reactivate the VGs.
-	stop && start
+
+	# Try to get clvmd to restart itself. This will preserve 
+	# exclusive LV locks
+	action "Restarting $DAEMON: " $DAEMON -S || return $?
+
+	# If that fails then do a normal stop & restart
+	if  [ $? != 0 ]; then
+	    stop && start
+	else
+	    touch $LOCK_FILE
+	fi
 }
 
 # See how we were called.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ ...
@ 2009-05-19 10:39 mbroz
  0 siblings, 0 replies; 6+ messages in thread
From: mbroz @ 2009-05-19 10:39 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	mbroz@sourceware.org	2009-05-19 10:39:01

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvm.h clvmd-command.c clvmd.c lvm-functions.c 
	                 lvm-functions.h 
	lib/activate   : activate.c 
	lib/locking    : cluster_locking.c locking.c locking.h 
	                 locking_types.h 

Log message:
	Add infrastructure for queriying for remote locks.
	
	Current code, when need to ensure that volume is not
	active on remote node, it need to try to exclusive
	activate volume.
	
	Patch adds simple clvmd command which queries all nodes
	for lock for given resource.
	
	The lock type is returned in reply in text.
	
	(But code currently uses CR and EX modes only.)

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.1115&r2=1.1116
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.6&r2=1.7
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-command.c.diff?cvsroot=lvm2&r1=1.27&r2=1.28
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd.c.diff?cvsroot=lvm2&r1=1.57&r2=1.58
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.c.diff?cvsroot=lvm2&r1=1.60&r2=1.61
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.h.diff?cvsroot=lvm2&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/activate/activate.c.diff?cvsroot=lvm2&r1=1.147&r2=1.148
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/cluster_locking.c.diff?cvsroot=lvm2&r1=1.33&r2=1.34
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/locking.c.diff?cvsroot=lvm2&r1=1.57&r2=1.58
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/locking.h.diff?cvsroot=lvm2&r1=1.45&r2=1.46
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/locking_types.h.diff?cvsroot=lvm2&r1=1.15&r2=1.16

--- LVM2/WHATS_NEW	2009/05/19 10:25:16	1.1115
+++ LVM2/WHATS_NEW	2009/05/19 10:38:58	1.1116
@@ -1,5 +1,6 @@
 Version 2.02.46 - 
 ================================
+  Introduce CLVMD_CMD_LOCK_QUERY command for clvmd.
   Use lvconvert --repair in dmeventd mirror DSO.
   Fix pvmove to revert operation if temporary mirror creation fails.
   Fix metadata export for VG with missing PVs.
--- LVM2/daemons/clvmd/clvm.h	2007/12/04 15:39:26	1.6
+++ LVM2/daemons/clvmd/clvm.h	2009/05/19 10:38:58	1.7
@@ -62,6 +62,7 @@
 /* Lock/Unlock commands */
 #define CLVMD_CMD_LOCK_LV           50
 #define CLVMD_CMD_LOCK_VG           51
+#define CLVMD_CMD_LOCK_QUERY	    52
 
 /* Misc functions */
 #define CLVMD_CMD_REFRESH	    40
--- LVM2/daemons/clvmd/clvmd-command.c	2009/04/22 09:39:45	1.27
+++ LVM2/daemons/clvmd/clvmd-command.c	2009/05/19 10:39:00	1.28
@@ -90,6 +90,7 @@
 	int arglen = msglen - sizeof(struct clvm_header) - strlen(msg->node);
 	int status = 0;
 	char *lockname;
+	const char *locktype;
 	struct utsname nodeinfo;
 	unsigned char lock_cmd;
 	unsigned char lock_flags;
@@ -144,6 +145,14 @@
 		}
 		break;
 
+	case CLVMD_CMD_LOCK_QUERY:
+		lockname = &args[2];
+		if (buflen < 3)
+			return EIO;
+		if ((locktype = do_lock_query(lockname)))
+			*retlen = 1 + snprintf(*buf, buflen, "%s", locktype);
+		break;
+
 	case CLVMD_CMD_REFRESH:
 		do_refresh_cache();
 		break;
@@ -278,6 +287,7 @@
 	case CLVMD_CMD_GET_CLUSTERNAME:
 	case CLVMD_CMD_SET_DEBUG:
 	case CLVMD_CMD_VG_BACKUP:
+	case CLVMD_CMD_LOCK_QUERY:
 		break;
 
 	default:
@@ -308,6 +318,7 @@
 
 	case CLVMD_CMD_LOCK_VG:
 	case CLVMD_CMD_VG_BACKUP:
+	case CLVMD_CMD_LOCK_QUERY:
 		/* Nothing to do here */
 		break;
 
--- LVM2/daemons/clvmd/clvmd.c	2009/04/22 10:38:16	1.57
+++ LVM2/daemons/clvmd/clvmd.c	2009/05/19 10:39:00	1.58
@@ -257,6 +257,9 @@
 	case CLVMD_CMD_UNLOCK:
 		command = "UNLOCK";
 		break;
+	case CLVMD_CMD_LOCK_QUERY:
+		command = "LOCK_QUERY";
+		break;
 	default:
 		command = "unknown";
 		break;
--- LVM2/daemons/clvmd/lvm-functions.c	2009/04/21 13:11:28	1.60
+++ LVM2/daemons/clvmd/lvm-functions.c	2009/05/19 10:39:00	1.61
@@ -434,6 +434,26 @@
 	return 0;
 }
 
+const char *do_lock_query(char *resource)
+{
+	int mode;
+	const char *type = NULL;
+
+	mode = get_current_lock(resource);
+	switch (mode) {
+		case LKM_NLMODE: type = "NL"; break;
+		case LKM_CRMODE: type = "CR"; break;
+		case LKM_CWMODE: type = "CW"; break;
+		case LKM_PRMODE: type = "PR"; break;
+		case LKM_PWMODE: type = "PW"; break;
+		case LKM_EXMODE: type = "EX"; break;
+	}
+
+	DEBUGLOG("do_lock_query: resource '%s', mode %i (%s)\n", resource, mode, type ?: "?");
+
+	return type;
+}
+
 /* This is the LOCK_LV part that happens on all nodes in the cluster -
    it is responsible for the interaction with device-mapper and LVM */
 int do_lock_lv(unsigned char command, unsigned char lock_flags, char *resource)
--- LVM2/daemons/clvmd/lvm-functions.h	2009/04/21 13:11:28	1.9
+++ LVM2/daemons/clvmd/lvm-functions.h	2009/05/19 10:39:00	1.10
@@ -22,6 +22,7 @@
 		       char *resource);
 extern int do_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
 		      char *resource);
+extern const char *do_lock_query(char *resource);
 extern int post_lock_lv(unsigned char lock_cmd, unsigned char lock_flags,
 			char *resource);
 extern int do_check_lvm1(const char *vgname);
--- LVM2/lib/activate/activate.c	2009/05/13 21:27:43	1.147
+++ LVM2/lib/activate/activate.c	2009/05/19 10:39:00	1.148
@@ -695,21 +695,7 @@
 	if (!vg_is_clustered(lv->vg))
 		return 0;
 
-	/*
-	 * FIXME: Cluster does not report per-node LV activation status.
-	 * Currently the best we can do is try exclusive local activation.
-	 * If that succeeds, we know the LV is not active elsewhere in the
-	 * cluster.
-	 */
-	if (activate_lv_excl(lv->vg->cmd, lv)) {
-		deactivate_lv(lv->vg->cmd, lv);
-		return 0;
-	}
-
-	/*
-	 * Exclusive local activation failed so assume it is active elsewhere.
-	 */
-	return 1;
+	return remote_lock_held(lv->lvid.s);
 }
 
 /*
--- LVM2/lib/locking/cluster_locking.c	2009/04/22 09:39:46	1.33
+++ LVM2/lib/locking/cluster_locking.c	2009/05/19 10:39:00	1.34
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
  *
  * This file is part of LVM2.
  *
@@ -33,6 +33,7 @@
 
 #ifndef CLUSTER_LOCKING_INTERNAL
 int lock_resource(struct cmd_context *cmd, const char *resource, uint32_t flags);
+int lock_resource_query(const char *resource, int *mode);
 void locking_end(void);
 int locking_init(int type, struct config_tree *cf, uint32_t *flags);
 #endif
@@ -455,6 +456,69 @@
 	return _lock_for_cluster(clvmd_cmd, flags, lockname);
 }
 
+static int decode_lock_type(const char *response)
+{
+	if (!response)
+		return LCK_NULL;
+	else if (strcmp(response, "EX"))
+		return LCK_EXCL;
+	else if (strcmp(response, "CR"))
+		return LCK_READ;
+	else if (strcmp(response, "PR"))
+		return LCK_PREAD;
+
+	stack;
+	return 0;
+}
+
+#ifdef CLUSTER_LOCKING_INTERNAL
+static int _lock_resource_query(const char *resource, int *mode)
+#else
+int lock_resource_query(const char *resource, int *mode)
+#endif
+{
+	int i, status, len, num_responses, saved_errno;
+	const char *node = "";
+	char *args;
+	lvm_response_t *response = NULL;
+
+	saved_errno = errno;
+	len = strlen(resource) + 3;
+	args = alloca(len);
+	strcpy(args + 2, resource);
+
+	args[0] = 0;
+	args[1] = LCK_CLUSTER_VG;
+
+	status = _cluster_request(CLVMD_CMD_LOCK_QUERY, node, args, len,
+				  &response, &num_responses);
+	*mode = LCK_NULL;
+	for (i = 0; i < num_responses; i++) {
+		if (response[i].status == EHOSTDOWN)
+			continue;
+
+		if (!response[i].response[0])
+			continue;
+
+		/*
+		 * All nodes should use CR, or exactly one node
+		 * should held EX. (PR is obsolete)
+		 * If two nodes node reports different locks,
+		 * something is broken - just return more important mode.
+		 */
+		if (decode_lock_type(response[i].response) > *mode)
+			*mode = decode_lock_type(response[i].response);
+
+		log_debug("Lock held for %s, node %s : %s", resource,
+			  response[i].node, response[i].response);
+	}
+
+	_cluster_free_request(response, num_responses);
+	errno = saved_errno;
+
+	return status;
+}
+
 #ifdef CLUSTER_LOCKING_INTERNAL
 static void _locking_end(void)
 #else
@@ -485,6 +549,7 @@
 int init_cluster_locking(struct locking_type *locking, struct cmd_context *cmd)
 {
 	locking->lock_resource = _lock_resource;
+	locking->lock_resource_query = _lock_resource_query;
 	locking->fin_locking = _locking_end;
 	locking->reset_locking = _reset_locking;
 	locking->flags = LCK_PRE_MEMLOCK | LCK_CLUSTERED;
--- LVM2/lib/locking/locking.c	2009/05/13 13:02:55	1.57
+++ LVM2/lib/locking/locking.c	2009/05/19 10:39:00	1.58
@@ -482,3 +482,21 @@
 	return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
 }
 
+int remote_lock_held(const char *vol)
+{
+	int mode = LCK_NULL;
+
+	if (!locking_is_clustered())
+		return 0;
+
+	/*
+	 * If an error occured, expect that volume is active
+	 */
+	if (!_locking.lock_resource_query ||
+	    !_locking.lock_resource_query(vol, &mode)) {
+		stack;
+		return 1;
+	}
+
+	return mode == LCK_NULL ? 0 : 1;
+}
--- LVM2/lib/locking/locking.h	2009/04/22 09:39:46	1.45
+++ LVM2/lib/locking/locking.h	2009/05/19 10:39:00	1.46
@@ -25,6 +25,8 @@
 int vg_write_lock_held(void);
 int locking_is_clustered(void);
 
+int remote_lock_held(const char *vol);
+
 /*
  * LCK_VG:
  *   Lock/unlock on-disk volume group data.
--- LVM2/lib/locking/locking_types.h	2007/08/22 14:38:17	1.15
+++ LVM2/lib/locking/locking_types.h	2009/05/19 10:39:00	1.16
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.  
- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
  *
  * This file is part of LVM2.
  *
@@ -18,6 +18,7 @@
 
 typedef int (*lock_resource_fn) (struct cmd_context * cmd, const char *resource,
 				 uint32_t flags);
+typedef int (*lock_resource_query_fn) (const char *resource, int *mode);
 
 typedef void (*fin_lock_fn) (void);
 typedef void (*reset_lock_fn) (void);
@@ -28,6 +29,7 @@
 struct locking_type {
 	uint32_t flags;
 	lock_resource_fn lock_resource;
+	lock_resource_query_fn lock_resource_query;
 
 	reset_lock_fn reset_locking;
 	fin_lock_fn fin_locking;


^ permalink raw reply	[flat|nested] 6+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ ...
@ 2007-12-04 15:39 pcaulfield
  0 siblings, 0 replies; 6+ messages in thread
From: pcaulfield @ 2007-12-04 15:39 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	pcaulfield@sourceware.org	2007-12-04 15:39:26

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvm.h clvmd-command.c lvm-functions.c 
	                 lvm-functions.h 
	lib/locking    : cluster_locking.c 

Log message:
	When we unlock a VG tell the clvmds to see if a backup of the metadata needs
	to be done.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.740&r2=1.741
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.5&r2=1.6
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-command.c.diff?cvsroot=lvm2&r1=1.18&r2=1.19
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.c.diff?cvsroot=lvm2&r1=1.35&r2=1.36
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/lvm-functions.h.diff?cvsroot=lvm2&r1=1.5&r2=1.6
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/locking/cluster_locking.c.diff?cvsroot=lvm2&r1=1.22&r2=1.23

--- LVM2/WHATS_NEW	2007/11/22 13:57:20	1.740
+++ LVM2/WHATS_NEW	2007/12/04 15:39:25	1.741
@@ -1,5 +1,6 @@
 Version 2.02.29 -
 ==================================
+  Make clvmd backup vg metadata on remote nodes.
   Refactor pvmove allocation code.
   Decode cluster locking state in log message.
   Change file locking state messages from debug to very verbose.
--- LVM2/daemons/clvmd/clvm.h	2007/08/17 11:51:23	1.5
+++ LVM2/daemons/clvmd/clvm.h	2007/12/04 15:39:26	1.6
@@ -67,4 +67,5 @@
 #define CLVMD_CMD_REFRESH	    40
 #define CLVMD_CMD_GET_CLUSTERNAME   41
 #define CLVMD_CMD_SET_DEBUG	    42
+#define CLVMD_CMD_VG_BACKUP	    43
 #endif
--- LVM2/daemons/clvmd/clvmd-command.c	2007/08/23 15:43:20	1.18
+++ LVM2/daemons/clvmd/clvmd-command.c	2007/12/04 15:39:26	1.19
@@ -153,6 +153,10 @@
 			*retlen = strlen(*buf)+1;
 		break;
 
+	case CLVMD_CMD_VG_BACKUP:
+		lvm_do_backup(&args[2]);
+		break;
+
 	default:
 		/* Won't get here because command is validated in pre_command */
 		break;
@@ -260,6 +264,7 @@
 	case CLVMD_CMD_REFRESH:
 	case CLVMD_CMD_GET_CLUSTERNAME:
 	case CLVMD_CMD_SET_DEBUG:
+	case CLVMD_CMD_VG_BACKUP:
 		break;
 
 	default:
@@ -289,6 +294,7 @@
 		break;
 
 	case CLVMD_CMD_LOCK_VG:
+	case CLVMD_CMD_VG_BACKUP:
 		/* Nothing to do here */
 		break;
 
--- LVM2/daemons/clvmd/lvm-functions.c	2007/11/14 13:37:51	1.35
+++ LVM2/daemons/clvmd/lvm-functions.c	2007/12/04 15:39:26	1.36
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  *
  * This file is part of LVM2.
  *
@@ -46,6 +46,7 @@
 #include "log.h"
 #include "activate.h"
 #include "locking.h"
+#include "archiver.h"
 #include "defaults.h"
 
 static struct cmd_context *cmd = NULL;
@@ -550,7 +551,7 @@
 {
 
 	/* Send messages to the normal LVM2 logging system too,
-	   so we get debug output when it's asked for. 
+	   so we get debug output when it's asked for.
  	   We need to NULL the function ptr otherwise it will just call
 	   back into here! */
 	init_log_fn(NULL);
@@ -600,6 +601,21 @@
 	pthread_mutex_init(&lvm_lock, NULL);
 }
 
+/* Backups up the LVM metadata if it's changed */
+void lvm_do_backup(char *vgname)
+{
+	struct volume_group * vg;
+	int consistent;
+
+	DEBUGLOG("Triggering backup of VG metadata for %s\n", vgname);
+
+	vg = vg_read(cmd, vgname, NULL /*vgid*/, &consistent);
+	if (vg)
+		check_current_backup(vg);
+	else
+		log_error("Error backing up metadata, can't find VG for group %s", vgname);
+}
+
 /* Called to initialise the LVM context of the daemon */
 int init_lvm(int using_gulm)
 {
@@ -614,6 +630,9 @@
 	init_debug(cmd->current_settings.debug);
 	init_verbose(cmd->current_settings.verbose + VERBOSE_BASE_LEVEL);
 	set_activation(cmd->current_settings.activation);
+	archive_enable(cmd, cmd->current_settings.archive);
+	backup_enable(cmd, cmd->current_settings.backup);
+	cmd->cmd_line = (char *)"clvmd";
 
 	/* Check lvm.conf is setup for cluster-LVM */
 	check_config();
--- LVM2/daemons/clvmd/lvm-functions.h	2007/08/07 09:06:05	1.5
+++ LVM2/daemons/clvmd/lvm-functions.h	2007/12/04 15:39:26	1.6
@@ -28,7 +28,7 @@
 extern int do_refresh_cache(void);
 extern int init_lvm(int using_gulm);
 extern void init_lvhash(void);
-
+extern void lvm_do_backup(char *vgname);
 extern int hold_unlock(char *resource);
 extern int hold_lock(char *resource, int mode, int flags);
 extern void unlock_all(void);
--- LVM2/lib/locking/cluster_locking.c	2007/11/16 21:16:20	1.22
+++ LVM2/lib/locking/cluster_locking.c	2007/12/04 15:39:26	1.23
@@ -295,7 +295,7 @@
 	return 1;
 }
 
-static int _lock_for_cluster(unsigned char cmd, uint32_t flags, char *name)
+static int _lock_for_cluster(unsigned char cmd, uint32_t flags, const char *name)
 {
 	int status;
 	int i;
@@ -378,6 +378,7 @@
 {
 	char lockname[PATH_MAX];
 	int cluster_cmd = 0;
+	int ret;
 	const char *lock_scope;
 	const char *lock_type = "";
 
@@ -447,7 +448,13 @@
 			 flags);
 
 	/* Send a message to the cluster manager */
-	return _lock_for_cluster(cluster_cmd, flags, lockname);
+	ret = _lock_for_cluster(cluster_cmd, flags, lockname);
+
+	/* If we are unlocking a VG, then trigger remote metadata backups */
+	if (ret && cluster_cmd == CLVMD_CMD_LOCK_VG && ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)) {
+		ret = _lock_for_cluster(CLVMD_CMD_VG_BACKUP, LCK_CLUSTER_VG, resource);
+	}
+	return ret;
 }
 
 #ifdef CLUSTER_LOCKING_INTERNAL


^ permalink raw reply	[flat|nested] 6+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ ...
@ 2006-10-09 14:11 pcaulfield
  0 siblings, 0 replies; 6+ messages in thread
From: pcaulfield @ 2006-10-09 14:11 UTC (permalink / raw)
  To: lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	pcaulfield@sourceware.org	2006-10-09 14:11:57

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvm.h clvmd-cman.c clvmd-command.c 
	                 clvmd-comms.h clvmd-gulm.c clvmd.c 

Log message:
	Add clvmd call to return the cluster name.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.463&r2=1.464
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.3&r2=1.4
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-cman.c.diff?cvsroot=lvm2&r1=1.15&r2=1.16
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-command.c.diff?cvsroot=lvm2&r1=1.10&r2=1.11
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-comms.h.diff?cvsroot=lvm2&r1=1.5&r2=1.6
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-gulm.c.diff?cvsroot=lvm2&r1=1.18&r2=1.19
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd.c.diff?cvsroot=lvm2&r1=1.28&r2=1.29

--- LVM2/WHATS_NEW	2006/10/08 12:01:12	1.463
+++ LVM2/WHATS_NEW	2006/10/09 14:11:57	1.464
@@ -1,5 +1,6 @@
 Version 2.02.11 - 
 =====================================
+  Add clvmd function to return the cluster name. not used by LVM yet.
   Add cling allocation policy.
   Change _check_contiguous() to use _for_each_pv().
   Extend _for_each_pv() to allow termination without error.
--- LVM2/daemons/clvmd/clvm.h	2006/10/04 08:22:16	1.3
+++ LVM2/daemons/clvmd/clvm.h	2006/10/09 14:11:57	1.4
@@ -65,5 +65,6 @@
 
 /* Misc functions */
 #define CLVMD_CMD_REFRESH	    40
+#define CLVMD_CMD_GET_CLUSTERNAME   41
 
 #endif
--- LVM2/daemons/clvmd/clvmd-cman.c	2006/10/06 10:06:10	1.15
+++ LVM2/daemons/clvmd/clvmd-cman.c	2006/10/09 14:11:57	1.16
@@ -471,6 +471,18 @@
 
 }
 
+static int _get_cluster_name(char *buf, int buflen)
+{
+	cman_cluster_t cluster_info;
+	int status;
+
+	status = cman_get_cluster(c_handle, &cluster_info);
+	if (!status) {
+		strncpy(buf, cluster_info.ci_name, buflen);
+	}
+	return status;
+}
+
 static struct cluster_ops _cluster_cman_ops = {
 	.cluster_init_completed   = _cluster_init_completed,
 	.cluster_send_message     = _cluster_send_message,
@@ -484,6 +496,7 @@
 	.get_our_csid             = _get_our_csid,
 	.add_up_node              = _add_up_node,
 	.cluster_closedown        = _cluster_closedown,
+	.get_cluster_name         = _get_cluster_name,
 	.sync_lock                = _sync_lock,
 	.sync_unlock              = _sync_unlock,
 };
--- LVM2/daemons/clvmd/clvmd-command.c	2006/10/05 13:55:50	1.10
+++ LVM2/daemons/clvmd/clvmd-command.c	2006/10/09 14:11:57	1.11
@@ -75,6 +75,8 @@
 #include "clvmd.h"
 #include "libdlm.h"
 
+extern struct cluster_ops *clops;
+
 /* This is where all the real work happens:
    NOTE: client will be NULL when this is executed on a remote node */
 int do_command(struct local_client *client, struct clvm_header *msg, int msglen,
@@ -126,6 +128,12 @@
 		do_refresh_cache();
 		break;
 
+	case CLVMD_CMD_GET_CLUSTERNAME:
+		status = clops->get_cluster_name(*buf, buflen);
+		if (!status)
+			*retlen = strlen(*buf);
+		break;
+
 	default:
 		/* Won't get here because command is validated in pre_command */
 		break;
@@ -227,6 +235,7 @@
 		break;
 
 	case CLVMD_CMD_REFRESH:
+	case CLVMD_CMD_GET_CLUSTERNAME:
 		break;
 
 	default:
--- LVM2/daemons/clvmd/clvmd-comms.h	2006/03/15 08:36:11	1.5
+++ LVM2/daemons/clvmd/clvmd-comms.h	2006/10/09 14:11:57	1.6
@@ -43,6 +43,8 @@
 	void (*reread_config) (void);
 	void (*cluster_closedown) (void);
 
+	int (*get_cluster_name)(char *buf, int buflen);
+
 	int (*sync_lock) (const char *resource, int mode, int flags, int *lockid);
 	int (*sync_unlock) (const char *resource, int lockid);
 
--- LVM2/daemons/clvmd/clvmd-gulm.c	2005/11/10 08:49:29	1.18
+++ LVM2/daemons/clvmd/clvmd-gulm.c	2006/10/09 14:11:57	1.19
@@ -973,6 +973,12 @@
 	return gulm_cluster_send_message(buf, msglen, csid, errtext);
 }
 
+static int _get_cluster_name(char *buf, int buflen)
+{
+	strncpy(buf, cluster_name, buflen);
+	return 0;
+}
+
 static struct cluster_ops _cluster_gulm_ops = {
 	.cluster_init_completed   = NULL,
 	.cluster_send_message     = _cluster_send_message,
@@ -987,6 +993,7 @@
 	.add_up_node              = gulm_add_up_node,
 	.reread_config            = _reread_config,
 	.cluster_closedown        = _cluster_closedown,
+	.get_cluster_name         = _get_cluster_name,
 	.sync_lock                = _sync_lock,
 	.sync_unlock              = _sync_unlock,
 };
--- LVM2/daemons/clvmd/clvmd.c	2006/10/05 13:55:50	1.28
+++ LVM2/daemons/clvmd/clvmd.c	2006/10/09 14:11:57	1.29
@@ -67,7 +67,7 @@
 
 static unsigned short global_xid = 0;	/* Last transaction ID issued */
 
-static struct cluster_ops *clops = NULL;
+struct cluster_ops *clops = NULL;
 
 static char our_csid[MAX_CSID_LEN];
 static unsigned max_csid_len;


^ permalink raw reply	[flat|nested] 6+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ ...
@ 2005-01-21 11:36 pcaulfield
  0 siblings, 0 replies; 6+ messages in thread
From: pcaulfield @ 2005-01-21 11:36 UTC (permalink / raw)
  To: lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	pcaulfield@sourceware.org	2005-01-21 11:35:24

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvm.h clvmd-cman.c clvmd-gulm.c clvmd.c 
	lib/locking    : cluster_locking.c 

Log message:
	Fix clvmd startup bug introduced in cman/gulm amalgamation. bz#145729
	
	Improve reporting of node-specific locking errors so you'll get
	somthing a little more helpfiul than "host is down" - it will now tell
	you /which/ host it thinks is down.

Patches:
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.164&r2=1.165
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvm.h.diff?cvsroot=lvm2&r1=1.1&r2=1.2
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-cman.c.diff?cvsroot=lvm2&r1=1.8&r2=1.9
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-gulm.c.diff?cvsroot=lvm2&r1=1.6&r2=1.7
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd.c.diff?cvsroot=lvm2&r1=1.12&r2=1.13
http://sources.redhat.com/cgi-bin/cvsweb.cgi/LVM2/lib/locking/cluster_locking.c.diff?cvsroot=lvm2&r1=1.2&r2=1.3


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2011-01-12 20:42 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-01-12 20:42 LVM2 ./WHATS_NEW daemons/clvmd/clvm.h daemons/ agk
  -- strict thread matches above, loose matches on Subject: below --
2010-04-20 14:07 ccaulfield
2009-05-19 10:39 mbroz
2007-12-04 15:39 pcaulfield
2006-10-09 14:11 pcaulfield
2005-01-21 11:36 pcaulfield

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).