public inbox for lvm2-cvs@sourceware.org help / color / mirror / Atom feed
From: agk@sourceware.org To: lvm2-cvs@sourceware.org Subject: LVM2 ./WHATS_NEW lib/metadata/lv_manip.c tools ... Date: Mon, 11 Sep 2006 14:25:00 -0000 [thread overview] Message-ID: <20060911142458.9633.qmail@sourceware.org> (raw) CVSROOT: /cvs/lvm2 Module name: LVM2 Changes by: agk@sourceware.org 2006-09-11 14:24:58 Modified files: . : WHATS_NEW lib/metadata : lv_manip.c tools : toollib.c Log message: Fix several incorrect comparisons in parallel area avoidance code. Fix segment lengths when flattening existing parallel areas. Log existing parallel areas prior to allocation. Fix mirror log creation when activation disabled. Patches: http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/WHATS_NEW.diff?cvsroot=lvm2&r1=1.436&r2=1.437 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/lib/metadata/lv_manip.c.diff?cvsroot=lvm2&r1=1.100&r2=1.101 http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/tools/toollib.c.diff?cvsroot=lvm2&r1=1.87&r2=1.88 --- LVM2/WHATS_NEW 2006/09/07 23:23:45 1.436 +++ LVM2/WHATS_NEW 2006/09/11 14:24:58 1.437 @@ -1,5 +1,9 @@ Version 2.02.10 - ================================== + Fix several incorrect comparisons in parallel area avoidance code. + Fix segment lengths when flattening existing parallel areas. + Log existing parallel areas prior to allocation. + Fix mirror log creation when activation disabled. Don't attempt automatic recovery without proper locking. When using local file locking, skip clustered VGs. Add fallback_to_clustered_locking and fallback_to_local_locking parameters. --- LVM2/lib/metadata/lv_manip.c 2006/08/21 12:54:53 1.100 +++ LVM2/lib/metadata/lv_manip.c 2006/09/11 14:24:58 1.101 @@ -158,7 +158,7 @@ } if (seg_lv(seg, s)->status & MIRROR_IMAGE) { - lv_reduce(seg_lv(seg, s), area_reduction); + lv_reduce(seg_lv(seg, s), area_reduction); return; } @@ -490,6 +490,49 @@ dm_pool_destroy(ah->mem); } +static int _log_parallel_areas(struct dm_pool *mem, struct list *parallel_areas) +{ + struct seg_pvs *spvs; + struct pv_list *pvl; + char *pvnames; + + if (!parallel_areas) + return 1; + + if (!dm_pool_begin_object(mem, 256)) { + log_error("dm_pool_begin_object failed"); + return 0; + } + + list_iterate_items(spvs, parallel_areas) { + list_iterate_items(pvl, &spvs->pvs) { + if (!dm_pool_grow_object(mem, dev_name(pvl->pv->dev), strlen(dev_name(pvl->pv->dev)))) { + log_error("dm_pool_grow_object failed"); + dm_pool_abandon_object(mem); + return 0; + } + if (!dm_pool_grow_object(mem, " ", 1)) { + log_error("dm_pool_grow_object failed"); + dm_pool_abandon_object(mem); + return 0; + } + } + + if (!dm_pool_grow_object(mem, "\0", 1)) { + log_error("dm_pool_grow_object failed"); + dm_pool_abandon_object(mem); + return 0; + } + + pvnames = dm_pool_end_object(mem); + log_debug("Parallel PVs at LE %" PRIu32 " length %" PRIu32 ": %s", + spvs->le, spvs->len, pvnames); + dm_pool_free(mem, pvnames); + } + + return 1; +} + static int _setup_alloced_segment(struct logical_volume *lv, uint32_t status, uint32_t area_count, uint32_t stripe_size, @@ -711,14 +754,15 @@ * the maximum we can allocate in one go accordingly. */ if (ah->parallel_areas) { + next_le = (prev_lvseg ? prev_lvseg->le + prev_lvseg->len : 0) + *allocated / ah->area_multiple; list_iterate_items(spvs, ah->parallel_areas) { - next_le = (prev_lvseg ? prev_lvseg->le + prev_lvseg->len : 0) + *allocated; - if (next_le >= spvs->le) { - if (next_le + max_parallel > spvs->le + spvs->len) - max_parallel = (spvs->le + spvs->len - next_le) * ah->area_multiple; - parallel_pvs = &spvs->pvs; - break; - } + if (next_le >= spvs->le + spvs->len) + continue; + + if (max_parallel > (spvs->le + spvs->len) * ah->area_multiple) + max_parallel = (spvs->le + spvs->len) * ah->area_multiple; + parallel_pvs = &spvs->pvs; + break; } } @@ -760,7 +804,8 @@ } /* Is it big enough on its own? */ - if ((pva->count < max_parallel - *allocated) && + if (pva->count * ah->area_multiple < + max_parallel - *allocated && ((!can_split && !ah->log_count) || (already_found_one && !(alloc == ALLOC_ANYWHERE)))) @@ -853,6 +898,9 @@ return 0; } + if (!_log_parallel_areas(ah->mem, ah->parallel_areas)) + stack; + areas_size = list_size(pvms); if (areas_size < ah->area_count + ah->log_count) { if (ah->alloc != ALLOC_ANYWHERE) { @@ -1213,7 +1261,7 @@ log_error("Aborting. Failed to extend %s.", seg_lv(seg, m)->name); return 0; - } + } } seg->area_len += extents; seg->len += extents; @@ -1345,7 +1393,7 @@ /* Remaining logical length of segment */ remaining_seg_len = seg->len - (le - seg->le); - if (len > remaining_seg_len) + if (remaining_seg_len > len) remaining_seg_len = len; if (spvs->len > remaining_seg_len) @@ -1426,7 +1474,7 @@ /* Find next segment end */ /* FIXME Unnecessary nesting! */ - if (!_for_each_pv(cmd, lv, current_le, lv->le_count, _add_pvs, spvs)) { + if (!_for_each_pv(cmd, lv, current_le, lv->le_count - current_le, _add_pvs, spvs)) { stack; return NULL; } --- LVM2/tools/toollib.c 2006/09/02 01:18:17 1.87 +++ LVM2/tools/toollib.c 2006/09/11 14:24:58 1.88 @@ -1204,7 +1204,6 @@ return 1; } - /* * This function writes a new header to the mirror log header to the lv * @@ -1301,6 +1300,12 @@ goto error; } + if (!activation() && in_sync) { + log_error("Aborting. Unable to create in-sync mirror log " + "while activation is disabled."); + goto error; + } + if (!activate_lv(cmd, log_lv)) { log_error("Aborting. Failed to activate mirror log. " "Remove new LVs and retry."); @@ -1313,7 +1318,7 @@ goto error; } - if (!_write_log_header(cmd, log_lv)) { + if (activation() && !_write_log_header(cmd, log_lv)) { log_error("Aborting. Failed to write mirror log header. " "Remove new LV and retry."); goto error;
next reply other threads:[~2006-09-11 14:25 UTC|newest] Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top 2006-09-11 14:25 agk [this message] -- strict thread matches above, loose matches on Subject: below -- 2012-01-20 22:03 snitzer 2011-10-06 15:32 jbrassow 2009-05-27 18:19 agk 2009-05-20 9:55 mbroz 2006-05-11 18:54 agk 2005-06-03 19:48 agk 2005-05-17 13:51 agk
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20060911142458.9633.qmail@sourceware.org \ --to=agk@sourceware.org \ --cc=lvm2-cvs@sourceware.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).