public inbox for lvm2-cvs@sourceware.org
help / color / mirror / Atom feed
* LVM2/lib/metadata metadata.h thin_manip.c
@ 2011-10-19 16:39 zkabelac
0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2011-10-19 16:39 UTC (permalink / raw)
To: lvm-devel, lvm2-cvs
CVSROOT: /cvs/lvm2
Module name: LVM2
Changes by: zkabelac@sourceware.org 2011-10-19 16:39:09
Modified files:
lib/metadata : metadata.h thin_manip.c
Log message:
Simple validation of messages in mda
Check we do not combine multiple messages for same LV target
and switch to use 'delete_id' to make it clear for what this device_id
is being used.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/metadata.h.diff?cvsroot=lvm2&r1=1.258&r2=1.259
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/thin_manip.c.diff?cvsroot=lvm2&r1=1.10&r2=1.11
--- LVM2/lib/metadata/metadata.h 2011/10/17 14:17:09 1.258
+++ LVM2/lib/metadata/metadata.h 2011/10/19 16:39:09 1.259
@@ -455,7 +455,7 @@
int attach_pool_lv(struct lv_segment *seg, struct logical_volume *pool_lv);
int detach_pool_lv(struct lv_segment *seg);
int attach_pool_message(struct lv_segment *seg, dm_thin_message_t type,
- struct logical_volume *lv, uint32_t device_id,
+ struct logical_volume *lv, uint32_t delete_id,
int read_only);
int detach_pool_messages(struct lv_segment *seg);
--- LVM2/lib/metadata/thin_manip.c 2011/10/19 16:37:30 1.10
+++ LVM2/lib/metadata/thin_manip.c 2011/10/19 16:39:09 1.11
@@ -81,11 +81,36 @@
}
int attach_pool_message(struct lv_segment *seg, dm_thin_message_t type,
- struct logical_volume *lv, uint32_t device_id,
+ struct logical_volume *lv, uint32_t delete_id,
int read_only)
{
struct lv_thin_message *tmsg;
+ dm_list_iterate_items(tmsg, &seg->thin_messages) {
+ if (tmsg->type == type) {
+ switch (tmsg->type) {
+ case DM_THIN_MESSAGE_CREATE_SNAP:
+ case DM_THIN_MESSAGE_CREATE_THIN:
+ case DM_THIN_MESSAGE_TRIM:
+ if (tmsg->u.lv == lv) {
+ log_error("Message referring LV %s already queued for %s.",
+ tmsg->u.lv->name, seg->lv->name);
+ return 0;
+ }
+ break;
+ case DM_THIN_MESSAGE_DELETE:
+ if (tmsg->u.delete_id == delete_id) {
+ log_error("Delete of device %u already queued for %s.",
+ tmsg->u.delete_id, seg->lv->name);
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
if (!(tmsg = dm_pool_alloc(seg->lv->vg->vgmem, sizeof(*tmsg)))) {
log_error("Failed to allocate memory for message.");
return 0;
@@ -98,7 +123,7 @@
tmsg->u.lv = lv;
break;
case DM_THIN_MESSAGE_DELETE:
- tmsg->u.delete_id = device_id;
+ tmsg->u.delete_id = delete_id;
break;
default:
log_error(INTERNAL_ERROR "Unsupported message type %d", type);
^ permalink raw reply [flat|nested] 4+ messages in thread
* LVM2/lib/metadata metadata.h thin_manip.c
@ 2012-01-25 8:55 zkabelac
0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2012-01-25 8:55 UTC (permalink / raw)
To: lvm-devel, lvm2-cvs
CVSROOT: /cvs/lvm2
Module name: LVM2
Changes by: zkabelac@sourceware.org 2012-01-25 08:55:20
Modified files:
lib/metadata : metadata.h thin_manip.c
Log message:
Thin fix transaction_id incrementation and code refactoring
Add pool_has_message and use it in attach_pool_message.
Also update header to make more obvious which segment type is
expected as parameter.
Rename 'read_only' to 'no_update' (no auto update transaction_id)
to better fit how it's used.
Fix problem when there was only one stacked message replaced with delete
message that caused unwanted transaction_id increase.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/metadata.h.diff?cvsroot=lvm2&r1=1.264&r2=1.265
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/thin_manip.c.diff?cvsroot=lvm2&r1=1.29&r2=1.30
--- LVM2/lib/metadata/metadata.h 2011/11/07 11:03:47 1.264
+++ LVM2/lib/metadata/metadata.h 2012/01/25 08:55:19 1.265
@@ -452,16 +452,18 @@
/*
* From thin_manip.c
*/
-int attach_pool_metadata_lv(struct lv_segment *seg,
+int attach_pool_metadata_lv(struct lv_segment *pool_seg,
struct logical_volume *pool_metadata_lv);
-int attach_pool_data_lv(struct lv_segment *seg,
+int attach_pool_data_lv(struct lv_segment *pool_seg,
struct logical_volume *pool_data_lv);
int attach_pool_lv(struct lv_segment *seg, struct logical_volume *pool_lv,
struct logical_volume *origin_lv);
int detach_pool_lv(struct lv_segment *seg);
-int attach_pool_message(struct lv_segment *seg, dm_thin_message_t type,
+int attach_pool_message(struct lv_segment *pool_seg, dm_thin_message_t type,
struct logical_volume *lv, uint32_t delete_id,
- int read_only);
+ int auto_increment);
+int pool_has_message(const struct lv_segment *seg,
+ const struct logical_volume *lv, uint32_t device_id);
int extend_pool(struct logical_volume *lv, const struct segment_type *segtype,
struct alloc_handle *ah, uint32_t stripes, uint32_t stripe_size);
--- LVM2/lib/metadata/thin_manip.c 2012/01/23 17:46:31 1.29
+++ LVM2/lib/metadata/thin_manip.c 2012/01/25 08:55:19 1.30
@@ -20,18 +20,18 @@
#include "lv_alloc.h"
#include "archiver.h"
-int attach_pool_metadata_lv(struct lv_segment *seg, struct logical_volume *metadata_lv)
+int attach_pool_metadata_lv(struct lv_segment *pool_seg, struct logical_volume *metadata_lv)
{
- seg->metadata_lv = metadata_lv;
+ pool_seg->metadata_lv = metadata_lv;
metadata_lv->status |= THIN_POOL_METADATA;
lv_set_hidden(metadata_lv);
- return add_seg_to_segs_using_this_lv(metadata_lv, seg);
+ return add_seg_to_segs_using_this_lv(metadata_lv, pool_seg);
}
-int attach_pool_data_lv(struct lv_segment *seg, struct logical_volume *pool_data_lv)
+int attach_pool_data_lv(struct lv_segment *pool_seg, struct logical_volume *pool_data_lv)
{
- if (!set_lv_segment_area_lv(seg, 0, pool_data_lv, 0, THIN_POOL_DATA))
+ if (!set_lv_segment_area_lv(pool_seg, 0, pool_data_lv, 0, THIN_POOL_DATA))
return_0;
lv_set_hidden(pool_data_lv);
@@ -56,6 +56,7 @@
{
struct lv_thin_message *tmsg, *tmp;
struct seg_list *sl, *tsl;
+ int no_update = 0;
if (!seg->pool_lv || !lv_is_thin_pool(seg->pool_lv)) {
log_error(INTERNAL_ERROR "LV %s is not a thin volume",
@@ -64,15 +65,16 @@
}
/* Drop any message referencing removed segment */
- dm_list_iterate_items_safe(tmsg, tmp, &first_seg(seg->pool_lv)->thin_messages) {
+ dm_list_iterate_items_safe(tmsg, tmp, &(first_seg(seg->pool_lv)->thin_messages)) {
switch (tmsg->type) {
case DM_THIN_MESSAGE_CREATE_SNAP:
case DM_THIN_MESSAGE_CREATE_THIN:
case DM_THIN_MESSAGE_TRIM:
- if (first_seg(tmsg->u.lv) == seg) {
+ if (tmsg->u.lv == seg->lv) {
log_debug("Discarding message for LV %s.",
tmsg->u.lv->name);
dm_list_del(&tmsg->list);
+ no_update = 1; /* Replacing existing */
}
break;
case DM_THIN_MESSAGE_DELETE:
@@ -90,7 +92,7 @@
if (!attach_pool_message(first_seg(seg->pool_lv),
DM_THIN_MESSAGE_DELETE,
- NULL, seg->device_id, 0))
+ NULL, seg->device_id, no_update))
return_0;
if (!remove_seg_from_segs_using_this_lv(seg->pool_lv, seg))
@@ -119,40 +121,28 @@
return 1;
}
-int attach_pool_message(struct lv_segment *seg, dm_thin_message_t type,
+int attach_pool_message(struct lv_segment *pool_seg, dm_thin_message_t type,
struct logical_volume *lv, uint32_t delete_id,
- int read_only)
+ int no_update)
{
struct lv_thin_message *tmsg;
- dm_list_iterate_items(tmsg, &seg->thin_messages) {
- if (tmsg->type != type)
- continue;
+ if (!seg_is_thin_pool(pool_seg)) {
+ log_error(INTERNAL_ERROR "LV %s is not pool.", pool_seg->lv->name);
+ return 0;
+ }
- switch (tmsg->type) {
- case DM_THIN_MESSAGE_CREATE_SNAP:
- case DM_THIN_MESSAGE_CREATE_THIN:
- case DM_THIN_MESSAGE_TRIM:
- if (tmsg->u.lv == lv) {
- log_error("Message referring LV %s already queued for %s.",
- tmsg->u.lv->name, seg->lv->name);
- return 0;
- }
- break;
- case DM_THIN_MESSAGE_DELETE:
- if (tmsg->u.delete_id == delete_id) {
- log_error("Delete of device %u already queued for %s.",
- tmsg->u.delete_id, seg->lv->name);
- return 0;
- }
- break;
- default:
- log_error(INTERNAL_ERROR "Unsupported message type %u.", tmsg->type);
- return 0;
- }
+ if (pool_has_message(pool_seg, lv, delete_id)) {
+ if (lv)
+ log_error("Message referring LV %s already queued in pool %s.",
+ lv->name, pool_seg->lv->name);
+ else
+ log_error("Delete for device %u already queued in pool %s.",
+ delete_id, pool_seg->lv->name);
+ return 0;
}
- if (!(tmsg = dm_pool_alloc(seg->lv->vg->vgmem, sizeof(*tmsg)))) {
+ if (!(tmsg = dm_pool_alloc(pool_seg->lv->vg->vgmem, sizeof(*tmsg)))) {
log_error("Failed to allocate memory for message.");
return 0;
}
@@ -174,10 +164,10 @@
tmsg->type = type;
/* If the 1st message is add in non-read-only mode, modify transaction_id */
- if (!read_only && dm_list_empty(&seg->thin_messages))
- seg->transaction_id++;
+ if (!no_update && dm_list_empty(&pool_seg->thin_messages))
+ pool_seg->transaction_id++;
- dm_list_add(&seg->thin_messages, &tmsg->list);
+ dm_list_add(&pool_seg->thin_messages, &tmsg->list);
log_debug("Added %s message",
(type == DM_THIN_MESSAGE_CREATE_SNAP ||
@@ -188,6 +178,43 @@
return 1;
}
+/*
+ * Check whether pool has some message queued for LV or for device_id
+ * When LV is NULL and device_id is 0 it just checks for any message.
+ */
+int pool_has_message(const struct lv_segment *seg,
+ const struct logical_volume *lv, uint32_t device_id)
+{
+ const struct lv_thin_message *tmsg;
+
+ if (!seg_is_thin_pool(seg)) {
+ log_error(INTERNAL_ERROR "LV %s is not pool.", seg->lv->name);
+ return 0;
+ }
+
+ if (!lv && !device_id)
+ return dm_list_empty(&seg->thin_messages);
+
+ dm_list_iterate_items(tmsg, &seg->thin_messages) {
+ switch (tmsg->type) {
+ case DM_THIN_MESSAGE_CREATE_SNAP:
+ case DM_THIN_MESSAGE_CREATE_THIN:
+ case DM_THIN_MESSAGE_TRIM:
+ if (tmsg->u.lv == lv)
+ return 1;
+ break;
+ case DM_THIN_MESSAGE_DELETE:
+ if (tmsg->u.delete_id == device_id)
+ return 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
struct lv_segment *find_pool_seg(const struct lv_segment *seg)
{
struct lv_segment *pool_seg;
^ permalink raw reply [flat|nested] 4+ messages in thread
* LVM2/lib/metadata metadata.h thin_manip.c
@ 2011-10-22 16:44 zkabelac
0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2011-10-22 16:44 UTC (permalink / raw)
To: lvm-devel, lvm2-cvs
CVSROOT: /cvs/lvm2
Module name: LVM2
Changes by: zkabelac@sourceware.org 2011-10-22 16:44:23
Modified files:
lib/metadata : metadata.h thin_manip.c
Log message:
Recoded way to insert thin pool into vg
Code in _lv_insert_empty_sublvs was not able to provide proper
initialization order for thin pool LV.
New function extend_pool() first adds metadata segment to pool LV which
is still visible. Such LV is activate and cleared.
Then new meta LV is created and metadata segments are moved there.
Now the preallocated pool data segment is attached to the pool LV
and layer _tpool is created. Finaly segment is marked as thin_pool.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/metadata.h.diff?cvsroot=lvm2&r1=1.260&r2=1.261
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/thin_manip.c.diff?cvsroot=lvm2&r1=1.12&r2=1.13
--- LVM2/lib/metadata/metadata.h 2011/10/22 16:42:11 1.260
+++ LVM2/lib/metadata/metadata.h 2011/10/22 16:44:23 1.261
@@ -69,6 +69,7 @@
struct dm_config_tree;
struct metadata_area;
+struct alloc_handle;
/* Per-format per-metadata area operations */
struct metadata_area_ops {
@@ -461,6 +462,8 @@
struct logical_volume *lv, uint32_t delete_id,
int read_only);
int detach_pool_messages(struct lv_segment *seg);
+int extend_pool(struct logical_volume *lv, const struct segment_type *segtype,
+ struct alloc_handle *ah);
/*
* Begin skeleton for external LVM library
--- LVM2/lib/metadata/thin_manip.c 2011/10/19 16:42:14 1.12
+++ LVM2/lib/metadata/thin_manip.c 2011/10/22 16:44:23 1.13
@@ -13,6 +13,8 @@
*/
#include "lib.h"
+#include "activate.h"
+#include "locking.h"
#include "metadata.h"
#include "segtype.h"
#include "lv_alloc.h"
@@ -216,3 +218,96 @@
return max_id;
}
+
+int extend_pool(struct logical_volume *pool_lv, const struct segment_type *segtype,
+ struct alloc_handle *ah)
+{
+ const struct segment_type *striped;
+ struct logical_volume *meta_lv, *data_lv;
+ struct lv_segment *seg;
+ const size_t len = strlen(pool_lv->name) + 16;
+ char name[len];
+
+ if (lv_is_thin_pool(pool_lv)) {
+ log_error("Resize of pool %s not yet implemented.", pool_lv->name);
+ return 0;
+ }
+
+ /* LV is not yet a pool, so it's extension from lvcreate */
+ if (!(striped = get_segtype_from_string(pool_lv->vg->cmd, "striped")))
+ return_0;
+
+ if (activation() && segtype->ops->target_present &&
+ !segtype->ops->target_present(pool_lv->vg->cmd, NULL, NULL)) {
+ log_error("%s: Required device-mapper target(s) not "
+ "detected in your kernel.", segtype->name);
+ return 0;
+ }
+
+ /* Metadata segment */
+ if (!lv_add_segment(ah, 1, 1, pool_lv, striped, 1, 0, 0))
+ return_0;
+
+ if (activation()) {
+ if (!vg_write(pool_lv->vg) || !vg_commit(pool_lv->vg))
+ return_0;
+
+ /*
+ * If killed here, only the VISIBLE striped pool LV is left
+ * and user could easily remove it.
+ *
+ * FIXME: implement lazy clearing when activation is disabled
+ */
+
+ // FIXME: activate_lv_local_excl is actually wanted here
+ if (!activate_lv_local(pool_lv->vg->cmd, pool_lv) ||
+ // FIXME: maybe -zero n should allow to recreate same thin pool
+ // and different option should be used for zero_new_blocks
+ /* Clear 4KB of metadata device for new thin-pool. */
+ !set_lv(pool_lv->vg->cmd, pool_lv, UINT64_C(0), 0)) {
+ log_error("Aborting. Failed to wipe pool metadata %s.",
+ pool_lv->name);
+ return 0;
+ }
+
+ if (!deactivate_lv_local(pool_lv->vg->cmd, pool_lv)) {
+ log_error("Aborting. Could not deactivate pool metadata %s.",
+ pool_lv->name);
+ return 0;
+ }
+ } else {
+ log_error("Pool %s created without initilization.", pool_lv->name);
+ }
+
+ if (dm_snprintf(name, len, "%s_tmeta", pool_lv->name) < 0)
+ return_0;
+
+ if (!(meta_lv = lv_create_empty(name, NULL, LVM_READ | LVM_WRITE,
+ ALLOC_INHERIT, pool_lv->vg)))
+ return_0;
+
+ if (!move_lv_segments(meta_lv, pool_lv, 0, 0))
+ return_0;
+
+ /* Pool data segment */
+ if (!lv_add_segment(ah, 0, 1, pool_lv, striped, 1, 0, 0))
+ return_0;
+
+ if (!(data_lv = insert_layer_for_lv(pool_lv->vg->cmd, pool_lv,
+ pool_lv->status, "_tpool")))
+ return_0;
+
+ seg = first_seg(pool_lv);
+ seg->segtype = segtype; /* Set as thin_pool segment */
+ seg->lv->status |= THIN_POOL;
+
+ if (!attach_pool_metadata_lv(seg, meta_lv))
+ return_0;
+
+ /* Drop reference as attach_pool_data_lv() takes it again */
+ remove_seg_from_segs_using_this_lv(data_lv, seg);
+ if (!attach_pool_data_lv(seg, data_lv))
+ return_0;
+
+ return 1;
+}
^ permalink raw reply [flat|nested] 4+ messages in thread
* LVM2/lib/metadata metadata.h thin_manip.c
@ 2011-10-03 18:39 zkabelac
0 siblings, 0 replies; 4+ messages in thread
From: zkabelac @ 2011-10-03 18:39 UTC (permalink / raw)
To: lvm-devel, lvm2-cvs
CVSROOT: /cvs/lvm2
Module name: LVM2
Changes by: zkabelac@sourceware.org 2011-10-03 18:39:18
Modified files:
lib/metadata : metadata.h thin_manip.c
Log message:
Add simple function for lookup of some free device_id
Initial simple implementation for finding some free device_id.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/metadata.h.diff?cvsroot=lvm2&r1=1.256&r2=1.257
http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/thin_manip.c.diff?cvsroot=lvm2&r1=1.4&r2=1.5
--- LVM2/lib/metadata/metadata.h 2011/09/09 01:15:18 1.256
+++ LVM2/lib/metadata/metadata.h 2011/10/03 18:39:17 1.257
@@ -374,6 +374,9 @@
/* Find pool LV segment given a thin pool data or metadata segment. */
struct lv_segment *find_pool_seg(const struct lv_segment *seg);
+/* Find some unused device_id for thin pool LV segment. */
+uint32_t get_free_pool_device_id(struct lv_segment *thin_pool_seg);
+
/*
* Remove a dev_dir if present.
*/
--- LVM2/lib/metadata/thin_manip.c 2011/09/09 01:15:18 1.4
+++ LVM2/lib/metadata/thin_manip.c 2011/10/03 18:39:17 1.5
@@ -80,3 +80,40 @@
return pool_seg;
}
+
+/*
+ * Find a free device_id for given thin_pool segment.
+ *
+ * \return
+ * Free device id, or 0 if free device_id is not found.
+ *
+ * FIXME: Improve naive search and keep the value cached
+ * and updated during VG lifetime (so no const for lv_segment)
+ */
+uint32_t get_free_pool_device_id(struct lv_segment *thin_pool_seg)
+{
+ uint32_t dev_id, max_id = 0;
+ struct dm_list *h;
+
+ if (!seg_is_thin_pool(thin_pool_seg)) {
+ log_error("Segment in %s is not a thin pool segment.",
+ pool_seg->lv->name);
+ return 0;
+ }
+
+ dm_list_iterate(h, &thin_pool_seg->lv->segs_using_this_lv) {
+ dev_id = dm_list_item(h, struct seg_list)->seg->device_id;
+ if (dev_id > max_id)
+ max_id = dev_id;
+ }
+
+ if (++max_id >= (1 << 24)) {
+ // FIXME: try to find empty holes....
+ log_error("Free device_id exhausted...");
+ return 0;
+ }
+
+ log_debug("Found free pool device_id %u.", max_id);
+
+ return max_id;
+}
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2012-01-25 8:55 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-10-19 16:39 LVM2/lib/metadata metadata.h thin_manip.c zkabelac
-- strict thread matches above, loose matches on Subject: below --
2012-01-25 8:55 zkabelac
2011-10-22 16:44 zkabelac
2011-10-03 18:39 zkabelac
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).