public inbox for lvm2-cvs@sourceware.org
help / color / mirror / Atom feed
* LVM2 ./WHATS_NEW daemons/clvmd/clvmd-singlenode.c
@ 2012-04-24 12:16 zkabelac
  0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2012-04-24 12:16 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	zkabelac@sourceware.org	2012-04-24 12:16:40

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvmd-singlenode.c 

Log message:
	Update singlenode locking
	
	Support lock conversion
	Work also with LCK_READ
	TODO: do more validation.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.2388&r2=1.2389
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-singlenode.c.diff?cvsroot=lvm2&r1=1.15&r2=1.16

--- LVM2/WHATS_NEW	2012/04/24 12:13:29	1.2388
+++ LVM2/WHATS_NEW	2012/04/24 12:16:40	1.2389
@@ -1,5 +1,6 @@
 Version 2.02.96 - 
 ================================
+  Improve clmvd singlenode locking for better testing.
   Update and correct lvs man page with supported column names.
   Handle replacement of an active device that goes missing with an error device.
   Change change raid1 segtype always to request a flush when suspending.
--- LVM2/daemons/clvmd/clvmd-singlenode.c	2011/11/07 17:11:23	1.15
+++ LVM2/daemons/clvmd/clvmd-singlenode.c	2012/04/24 12:16:40	1.16
@@ -28,8 +28,15 @@
 
 static const char SINGLENODE_CLVMD_SOCKNAME[] = DEFAULT_RUN_DIR "/clvmd_singlenode.sock";
 static int listen_fd = -1;
-static int *_locks = NULL;
-static char **_resources = NULL;
+
+static struct dm_hash_table *_locks;
+static int _lockid;
+
+struct lock {
+	int lockid;
+	int mode;
+	int excl;
+};
 
 static void close_comms(void)
 {
@@ -88,9 +95,16 @@
 {
 	int r;
 
+	if (!(_locks = dm_hash_create(128))) {
+		DEBUGLOG("Failed to allocate single-node hash table.\n");
+		return 1;
+	}
+
 	r = init_comms();
-	if (r)
+	if (r) {
+		dm_hash_destroy(_locks);
 		return r;
+	}
 
 	DEBUGLOG("Single-node cluster initialised.\n");
 	return 0;
@@ -102,10 +116,9 @@
 
 	DEBUGLOG("cluster_closedown\n");
 	destroy_lvhash();
-	dm_free(_locks);
-	dm_free(_resources);
+	dm_hash_destroy(_locks);
 	_locks = NULL;
-	_resources = NULL;
+	_lockid = 0;
 }
 
 static void _get_our_csid(char *csid)
@@ -145,7 +158,6 @@
 
 int _lock_file(const char *file, uint32_t flags);
 
-static int _lock_max = 1;
 static pthread_mutex_t _lock_mutex = PTHREAD_MUTEX_INITIALIZER;
 /* Using one common condition for all locks for simplicity */
 static pthread_cond_t _lock_cond = PTHREAD_COND_INITIALIZER;
@@ -153,66 +165,46 @@
 /* Real locking */
 static int _lock_resource(const char *resource, int mode, int flags, int *lockid)
 {
-	int *_locks_1;
-	char **_resources_1;
-	int i, j;
-
-	if (mode == LCK_READ) { /* only track PREAD, aka PROTECTED READ */
-		DEBUGLOG("Not tracking CONCURRENT READ lock: %s, flags=%d, mode=%d\n",
-			 resource, flags, mode);
-		*lockid = -1;
-		return 0;
-	}
+	struct lock *lck;
 
 	DEBUGLOG("Locking resource %s, flags=%d, mode=%d\n",
 		 resource, flags, mode);
 
+	mode &= LCK_TYPE_MASK;
 	pthread_mutex_lock(&_lock_mutex);
 retry:
-
-	/* look for an existing lock for this resource */
-	for (i = 1; i < _lock_max; ++i) {
-		if (!_resources[i])
-			break;
-		if (!strcmp(_resources[i], resource)) {
-			if ((_locks[i] & LCK_TYPE_MASK) == LCK_WRITE ||
-			    (_locks[i] & LCK_TYPE_MASK) == LCK_EXCL) {
-				DEBUGLOG("Resource %s already write/exclusively locked...\n", resource);
-				goto maybe_retry;
-			}
-			if ((mode & LCK_TYPE_MASK) == LCK_WRITE ||
-			    (mode & LCK_TYPE_MASK) == LCK_EXCL) {
-				DEBUGLOG("Resource %s already locked and WRITE/EXCL lock requested...\n",
-					 resource);
-				goto maybe_retry;
-			}
-		}
-	}
-
-	if (i == _lock_max) { /* out of lock slots, extend */
-		if (!(_locks_1 = dm_realloc(_locks, 2 * _lock_max * sizeof(int))))
-			goto_bad;
-
-		_locks = _locks_1;
-		if (!(_resources_1 = dm_realloc(_resources, 2 * _lock_max * sizeof(char *))))
-			/* _locks may get realloc'd twice, but that should be safe */
-			goto_bad;
-
-		_resources = _resources_1;
-		/* clear the new resource entries */
-		for (j = _lock_max; j < 2 * _lock_max; ++j)
-			_resources[j] = NULL;
-		_lock_max = 2 * _lock_max;
-	}
-
-	/* resource is not currently locked, grab it */
-	if (!(_resources[i] = dm_strdup(resource)))
-		goto_bad;
-
-	*lockid = i;
-	_locks[i] = mode;
-
-	DEBUGLOG("Locked resource %s, lockid=%d\n", resource, i);
+	if (!(lck = dm_hash_lookup(_locks, resource))) {
+		/* Add new locked resource */
+		if (!(lck = dm_zalloc(sizeof(struct lock))) ||
+		    !dm_hash_insert(_locks, resource, lck))
+			goto bad;
+
+		lck->lockid = ++_lockid;
+		goto out;
+	}
+
+        /* Update/convert lock */
+	if (flags == LCKF_CONVERT) {
+		if (lck->excl)
+			mode = LCK_EXCL;
+	} else if ((lck->mode == LCK_WRITE) || (lck->mode == LCK_EXCL)) {
+		DEBUGLOG("Resource %s already %s locked (%d)...\n", resource,
+			 (lck->mode == LCK_WRITE) ? "write" : "exclusively", lck->lockid);
+		goto maybe_retry;
+	} else if (lck->mode > mode) {
+		DEBUGLOG("Resource %s already locked and %s lock requested...\n",
+			 resource,
+			 (mode == LCK_READ) ? "READ" :
+			 (mode == LCK_WRITE) ? "WRITE" : "EXCLUSIVE");
+		goto maybe_retry;
+	}
+
+out:
+	*lockid = lck->lockid;
+	lck->mode = mode;
+	lck->excl |= (mode == LCK_EXCL);
+	DEBUGLOG("Locked resource %s, lockid=%d, mode=%d\n", resource, lck->lockid, mode);
+	pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */
 	pthread_mutex_unlock(&_lock_mutex);
 
 	return 0;
@@ -220,6 +212,7 @@
 maybe_retry:
 	if (!(flags & LCK_NONBLOCK)) {
 		pthread_cond_wait(&_lock_cond, &_lock_mutex);
+		DEBUGLOG("Resource %s RETRYING lock...\n", resource);
 		goto retry;
 	}
 bad:
@@ -231,6 +224,8 @@
 
 static int _unlock_resource(const char *resource, int lockid)
 {
+	struct lock *lck;
+
 	if (lockid < 0) {
 		DEBUGLOG("Not tracking unlock of lockid -1: %s, lockid=%d\n",
 			 resource, lockid);
@@ -240,21 +235,21 @@
 	DEBUGLOG("Unlocking resource %s, lockid=%d\n", resource, lockid);
 	pthread_mutex_lock(&_lock_mutex);
 
-	if (!_resources[lockid]) {
+	if (!(lck = dm_hash_lookup(_locks, resource))) {
 		pthread_mutex_unlock(&_lock_mutex);
-		DEBUGLOG("Resource %s, lockid=%d is not locked\n", resource, lockid);
+		DEBUGLOG("Resource %s, lockid=%d is not locked.\n", resource, lockid);
 		return 1;
 	}
 
-	if (strcmp(_resources[lockid], resource)) {
+	if (lck->lockid != lockid) {
 		pthread_mutex_unlock(&_lock_mutex);
-		DEBUGLOG("Resource %d has wrong resource (requested %s, got %s)\n",
-			 lockid, resource, _resources[lockid]);
+		DEBUGLOG("Resource %s has wrong lockid %d, expected %d.\n",
+			 resource, lck->lockid, lockid);
 		return 1;
 	}
 
-	dm_free(_resources[lockid]);
-	_resources[lockid] = 0;
+	dm_hash_remove(_locks, resource);
+	dm_free(lck);
 	pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */
 	pthread_mutex_unlock(&_lock_mutex);
 


^ permalink raw reply	[flat|nested] 4+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvmd-singlenode.c
@ 2011-10-11  9:05 zkabelac
  0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2011-10-11  9:05 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	zkabelac@sourceware.org	2011-10-11 09:05:20

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvmd-singlenode.c 

Log message:
	Use condition instead of sleep
	
	Replace usleep with pthread condition to increase speed testing
	(for simplicity just 1 condition for all locks).
	
	Use thread mutex also for unlock resource (so it wakes up awaiting
	threads)
	
	Better check some error states and return error in fail case with
	unlocked mutex.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.2154&r2=1.2155
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-singlenode.c.diff?cvsroot=lvm2&r1=1.13&r2=1.14

--- LVM2/WHATS_NEW	2011/10/11 08:59:42	1.2154
+++ LVM2/WHATS_NEW	2011/10/11 09:05:20	1.2155
@@ -1,5 +1,6 @@
 Version 2.02.89 - 
 ==================================
+  Use pthread condition for SINGLENODE lock implementation.
   Improve backtrace reporting for some dev_manager_ functions.
   Change message severity to log_warn when symlink creation fails.
   Add ability to convert mirror segtype to RAID1 segtype.
--- LVM2/daemons/clvmd/clvmd-singlenode.c	2011/09/29 08:57:21	1.13
+++ LVM2/daemons/clvmd/clvmd-singlenode.c	2011/10/11 09:05:20	1.14
@@ -147,6 +147,8 @@
 
 static int _lock_max = 1;
 static pthread_mutex_t _lock_mutex = PTHREAD_MUTEX_INITIALIZER;
+/* Using one common condition for all locks for simplicity */
+static pthread_cond_t _lock_cond = PTHREAD_COND_INITIALIZER;
 
 /* Real locking */
 static int _lock_resource(const char *resource, int mode, int flags, int *lockid)
@@ -155,11 +157,11 @@
 	char **_resources_1;
 	int i, j;
 
-	DEBUGLOG("lock_resource '%s', flags=%d, mode=%d\n",
+	DEBUGLOG("Locking resource %s, flags=%d, mode=%d\n",
 		 resource, flags, mode);
 
- retry:
 	pthread_mutex_lock(&_lock_mutex);
+retry:
 
 	/* look for an existing lock for this resource */
 	for (i = 1; i < _lock_max; ++i) {
@@ -168,12 +170,12 @@
 		if (!strcmp(_resources[i], resource)) {
 			if ((_locks[i] & LCK_TYPE_MASK) == LCK_WRITE ||
 			    (_locks[i] & LCK_TYPE_MASK) == LCK_EXCL) {
-				DEBUGLOG("%s already write/exclusively locked...\n", resource);
+				DEBUGLOG("Resource %s already write/exclusively locked...\n", resource);
 				goto maybe_retry;
 			}
 			if ((mode & LCK_TYPE_MASK) == LCK_WRITE ||
 			    (mode & LCK_TYPE_MASK) == LCK_EXCL) {
-				DEBUGLOG("%s already locked and WRITE/EXCL lock requested...\n",
+				DEBUGLOG("Resource %s already locked and WRITE/EXCL lock requested...\n",
 					 resource);
 				goto maybe_retry;
 			}
@@ -181,15 +183,14 @@
 	}
 
 	if (i == _lock_max) { /* out of lock slots, extend */
-		_locks_1 = dm_realloc(_locks, 2 * _lock_max * sizeof(int));
-		if (!_locks_1)
-			return 1; /* fail */
+		if (!(_locks_1 = dm_realloc(_locks, 2 * _lock_max * sizeof(int))))
+			goto_bad;
+
 		_locks = _locks_1;
-		_resources_1 = dm_realloc(_resources, 2 * _lock_max * sizeof(char *));
-		if (!_resources_1) {
+		if (!(_resources_1 = dm_realloc(_resources, 2 * _lock_max * sizeof(char *))))
 			/* _locks may get realloc'd twice, but that should be safe */
-			return 1; /* fail */
-		}
+			goto_bad;
+
 		_resources = _resources_1;
 		/* clear the new resource entries */
 		for (j = _lock_max; j < 2 * _lock_max; ++j)
@@ -198,40 +199,52 @@
 	}
 
 	/* resource is not currently locked, grab it */
+	if (!(_resources[i] = dm_strdup(resource)))
+		goto_bad;
 
 	*lockid = i;
 	_locks[i] = mode;
-	_resources[i] = dm_strdup(resource);
-
-	DEBUGLOG("%s locked -> %d\n", resource, i);
 
+	DEBUGLOG("Locked resource %s, lockid=%d\n", resource, i);
 	pthread_mutex_unlock(&_lock_mutex);
+
 	return 0;
- maybe_retry:
-	pthread_mutex_unlock(&_lock_mutex);
+
+maybe_retry:
 	if (!(flags & LCK_NONBLOCK)) {
-		usleep(10000);
+		pthread_cond_wait(&_lock_cond, &_lock_mutex);
 		goto retry;
 	}
+bad:
+	DEBUGLOG("Failed to lock resource %s\n", resource);
+	pthread_mutex_unlock(&_lock_mutex);
 
 	return 1; /* fail */
 }
 
 static int _unlock_resource(const char *resource, int lockid)
 {
-	DEBUGLOG("unlock_resource: %s lockid: %x\n", resource, lockid);
-	if(!_resources[lockid]) {
-		DEBUGLOG("(%s) %d not locked\n", resource, lockid);
+	DEBUGLOG("Unlocking resource %s, lockid=%d\n", resource, lockid);
+	pthread_mutex_lock(&_lock_mutex);
+
+	if (!_resources[lockid]) {
+		pthread_mutex_unlock(&_lock_mutex);
+		DEBUGLOG("Resource %s, lockid=%d is not locked\n", resource, lockid);
 		return 1;
 	}
-	if(strcmp(_resources[lockid], resource)) {
-		DEBUGLOG("%d has wrong resource (requested %s, got %s)\n",
+
+	if (strcmp(_resources[lockid], resource)) {
+		pthread_mutex_unlock(&_lock_mutex);
+		DEBUGLOG("Resource %d has wrong resource (requested %s, got %s)\n",
 			 lockid, resource, _resources[lockid]);
 		return 1;
 	}
 
 	dm_free(_resources[lockid]);
 	_resources[lockid] = 0;
+	pthread_cond_broadcast(&_lock_cond); /* wakeup waiters */
+	pthread_mutex_unlock(&_lock_mutex);
+
 	return 0;
 }
 


^ permalink raw reply	[flat|nested] 4+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvmd-singlenode.c
@ 2011-08-04 12:13 zkabelac
  0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2011-08-04 12:13 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	zkabelac@sourceware.org	2011-08-04 12:13:51

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvmd-singlenode.c 

Log message:
	Add test for fcntl error in singlenode client code.
	
	Static analyzer noticed this check could be handy.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.2048&r2=1.2049
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-singlenode.c.diff?cvsroot=lvm2&r1=1.10&r2=1.11

--- LVM2/WHATS_NEW	2011/08/04 10:14:42	1.2048
+++ LVM2/WHATS_NEW	2011/08/04 12:13:50	1.2049
@@ -1,5 +1,6 @@
 Version 2.02.87 - 
 ===============================
+  Add test for fcntl error in singlenode client code.
   Remove --force option from lvrename manpage.
   Add missing new line in lvrename help text.
   Add basic support for RAID 1/4/5/6 (i.e. create, remove, display)
--- LVM2/daemons/clvmd/clvmd-singlenode.c	2011/03/24 10:45:00	1.10
+++ LVM2/daemons/clvmd/clvmd-singlenode.c	2011/08/04 12:13:51	1.11
@@ -55,7 +55,10 @@
 		goto error;
 	}
 	/* Set Close-on-exec */
-	fcntl(listen_fd, F_SETFD, 1);
+	if (fcntl(listen_fd, F_SETFD, 1)) {
+		DEBUGLOG("Setting CLOEXEC on client fd faile: %s\n", strerror(errno));
+		goto error;
+	}
 
 	memset(&addr, 0, sizeof(addr));
 	memcpy(addr.sun_path, SINGLENODE_CLVMD_SOCKNAME,


^ permalink raw reply	[flat|nested] 4+ messages in thread

* LVM2 ./WHATS_NEW daemons/clvmd/clvmd-singlenode.c
@ 2010-03-26 15:45 snitzer
  0 siblings, 0 replies; 4+ messages in thread
From: snitzer @ 2010-03-26 15:45 UTC (permalink / raw)
  To: lvm-devel, lvm2-cvs

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	snitzer@sourceware.org	2010-03-26 15:45:37

Modified files:
	.              : WHATS_NEW 
	daemons/clvmd  : clvmd-singlenode.c 

Log message:
	Use a real socket for singlenode clvmd to fix clvmd's high cpu load.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.1483&r2=1.1484
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/daemons/clvmd/clvmd-singlenode.c.diff?cvsroot=lvm2&r1=1.1&r2=1.2

--- LVM2/WHATS_NEW	2010/03/26 15:40:13	1.1483
+++ LVM2/WHATS_NEW	2010/03/26 15:45:36	1.1484
@@ -1,5 +1,6 @@
 Version 2.02.63 -  
 ================================
+  Use a real socket for singlenode clvmd to fix clvmd's high cpu load.
   Fix clvmd cluster propagation of dmeventd monitoring mode.
   Allow ALLOC_ANYWHERE to split contiguous areas.
   Use INTERNAL_ERROR for internal errors throughout tree.
--- LVM2/daemons/clvmd/clvmd-singlenode.c	2010/03/18 09:19:31	1.1
+++ LVM2/daemons/clvmd/clvmd-singlenode.c	2010/03/26 15:45:36	1.2
@@ -17,6 +17,7 @@
 
 #include <netinet/in.h>
 #include <sys/un.h>
+#include <sys/socket.h>
 #include <unistd.h>
 #include <fcntl.h>
 #include <configure.h>
@@ -31,18 +32,37 @@
 #include "lvm-functions.h"
 #include "clvmd.h"
 
+static const char SINGLENODE_CLVMD_SOCKNAME[] = "\0singlenode_clvmd";
 static int listen_fd = -1;
 
 static int init_comms()
 {
-	listen_fd = open("/dev/null", O_RDWR);
+	struct sockaddr_un addr;
 
-	if (listen_fd < 0)
+	listen_fd = socket(PF_UNIX, SOCK_STREAM, 0);
+	if (listen_fd < 0) {
+		DEBUGLOG("Can't create local socket: %s\n", strerror(errno));
 		return -1;
-
+	}
 	/* Set Close-on-exec */
 	fcntl(listen_fd, F_SETFD, 1);
 
+	memset(&addr, 0, sizeof(addr));
+	memcpy(addr.sun_path, SINGLENODE_CLVMD_SOCKNAME,
+	       sizeof(SINGLENODE_CLVMD_SOCKNAME));
+	addr.sun_family = AF_UNIX;
+
+	if (bind(listen_fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+		DEBUGLOG("Can't bind local socket: %s\n", strerror(errno));
+		close(listen_fd);
+		return -1;
+	}
+	if (listen(listen_fd, 10) < 0) {
+		DEBUGLOG("Can't listen local socket: %s\n", strerror(errno));
+		close(listen_fd);
+		return -1;
+	}
+
 	return 0;
 }
 


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-04-24 12:16 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-04-24 12:16 LVM2 ./WHATS_NEW daemons/clvmd/clvmd-singlenode.c zkabelac
  -- strict thread matches above, loose matches on Subject: below --
2011-10-11  9:05 zkabelac
2011-08-04 12:13 zkabelac
2010-03-26 15:45 snitzer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).