public inbox for lvm2-cvs@sourceware.org
help / color / mirror / Atom feed
From: zkabelac@sourceware.org
To: lvm-devel@redhat.com, lvm2-cvs@sourceware.org
Subject: LVM2/libdm libdm-deptree.c
Date: Sun, 30 Oct 2011 22:04:00 -0000	[thread overview]
Message-ID: <20111030220458.7249.qmail@sourceware.org> (raw)

CVSROOT:	/cvs/lvm2
Module name:	LVM2
Changes by:	zkabelac@sourceware.org	2011-10-30 22:04:57

Modified files:
	libdm          : libdm-deptree.c 

Log message:
	Thin segment transaction_id moved
	
	Add a new node flag send_messages that is used to simplify
	test when to call _node_send_messages().
	
	Add call to _node_send_messages when pool is deeper in the tree.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/libdm/libdm-deptree.c.diff?cvsroot=lvm2&r1=1.140&r2=1.141

--- LVM2/libdm/libdm-deptree.c	2011/10/28 20:19:26	1.140
+++ LVM2/libdm/libdm-deptree.c	2011/10/30 22:04:57	1.141
@@ -170,6 +170,7 @@
 	struct dm_tree_node *metadata;	/* Thin_pool */
 	struct dm_tree_node *pool;	/* Thin_pool, Thin */
 	struct dm_list thin_messages;	/* Thin_pool */
+	uint64_t transaction_id;	/* Thin_pool */
 	uint64_t low_water_mark;	/* Thin_pool */
 	uint32_t data_block_size;       /* Thin_pool */
 	unsigned skip_block_zeroing;	/* Thin_pool */
@@ -186,8 +187,6 @@
 	uint32_t read_ahead;
 	uint32_t read_ahead_flags;
 
-	uint64_t thin_pool_transaction_id; /* Thin_pool */
-
 	unsigned segment_count;
 	unsigned size_changed;
 	struct dm_list segs;
@@ -209,6 +208,9 @@
 	 * avoid starting the mirror resync operation too early.
 	 */
 	unsigned delay_resume_if_new;
+
+	/* Send messages for this node in preload */
+	unsigned send_messages;
 };
 
 /* Two of these used to join two nodes with uses and used_by. */
@@ -1335,8 +1337,7 @@
 	uint64_t trans_id;
 	const char *uuid;
 
-	if ((dnode == &dnode->dtree->root) || /* root has props.segs uninitialized */
-	    !dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
+	if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
 		return 1;
 
 	seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
@@ -1352,22 +1353,28 @@
 	}
 
 	if (!_thin_pool_status_transaction_id(dnode, &trans_id))
-		return_0;
+		goto_bad;
 
-	if (trans_id == dnode->props.thin_pool_transaction_id)
+	if (trans_id == seg->transaction_id)
 		return 1; /* In sync - skip messages */
 
-	if (trans_id != (dnode->props.thin_pool_transaction_id - 1)) {
+	if (trans_id != (seg->transaction_id - 1)) {
 		log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
-			  trans_id, dnode->props.thin_pool_transaction_id - 1);
-		return 0; /* Nothing to send */
+			  trans_id, seg->transaction_id - 1);
+		goto bad; /* Nothing to send */
 	}
 
 	dm_list_iterate_items(tmsg, &seg->thin_messages)
 		if (!(_thin_pool_node_message(dnode, tmsg)))
-			return_0;
+			goto_bad;
 
 	return 1;
+bad:
+	/* Try to deactivate */
+	if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
+		log_error("Failed to deactivate %s", dnode->name);
+
+	return 0;
 }
 
 /*
@@ -2315,7 +2322,11 @@
 
 		/* Update cached info */
 		child->info = newinfo;
-
+		if (child->props.send_messages &&
+		    !(r = _node_send_messages(child, uuid_prefix, uuid_prefix_len))) {
+			stack;
+			continue;
+		}
 		/*
 		 * Prepare for immediate synchronization with udev and flush all stacked
 		 * dev node operations if requested by immediate_dev_node property. But
@@ -2325,7 +2336,9 @@
 			update_devs_flag = 1;
 	}
 
-	handle = NULL;
+	if (r && dnode->props.send_messages &&
+	    !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
+		stack;
 
 	if (update_devs_flag) {
 		if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
@@ -2875,7 +2888,8 @@
 	if (!_link_tree_nodes(node, seg->pool))
 		return_0;
 
-	node->props.thin_pool_transaction_id = transaction_id; // compare on resume
+	node->props.send_messages = 1;
+	seg->transaction_id = transaction_id;
 	seg->low_water_mark = low_water_mark;
 	seg->data_block_size = data_block_size;
 	seg->skip_block_zeroing = skip_block_zeroing;


             reply	other threads:[~2011-10-30 22:04 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-10-30 22:04 zkabelac [this message]
  -- strict thread matches above, loose matches on Subject: below --
2012-05-15 20:03 agk
2012-05-15 14:10 agk
2012-03-04 16:05 zkabelac
2012-03-02 21:53 zkabelac
2012-01-25  8:46 zkabelac
2012-01-19 15:22 zkabelac
2011-11-04 12:39 zkabelac
2011-10-28 20:11 zkabelac
2011-10-20 10:39 zkabelac
2011-10-20 10:35 zkabelac
2011-10-19 16:45 zkabelac
2011-10-19 16:41 zkabelac
2011-10-17 14:15 zkabelac
2011-10-17 14:14 zkabelac
2011-10-03 18:29 zkabelac
2011-09-29  8:51 zkabelac
2011-07-08 19:13 agk
2011-06-22 12:56 prajnoha
2011-06-11 12:55 agk
2011-06-09 15:53 mbroz
2010-11-29 12:42 zkabelac
2010-11-29 11:26 zkabelac
2010-05-25  8:40 zkabelac
2010-04-07 23:51 agk
2010-04-07 21:25 agk
2010-01-15 16:00 jbrassow
2010-01-05 21:06 snitzer
2010-01-05 21:05 snitzer
2010-01-05 21:04 snitzer
2009-09-22 16:27 jbrassow
2009-07-07 16:36 agk
2008-12-12 18:45 agk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20111030220458.7249.qmail@sourceware.org \
    --to=zkabelac@sourceware.org \
    --cc=lvm-devel@redhat.com \
    --cc=lvm2-cvs@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).