diff options
| author | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:18:27 +0200 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:19:04 +0200 |
| commit | 6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch) | |
| tree | 021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /drivers/md/persistent-data/dm-btree-remove.c | |
| parent | 682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff) | |
| parent | a385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff) | |
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree),
to prepare for tooling changes, and also to pick up v3.4 MM
changes that the uprobes code needs to take care of.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/md/persistent-data/dm-btree-remove.c')
| -rw-r--r-- | drivers/md/persistent-data/dm-btree-remove.c | 202 |
1 files changed, 113 insertions, 89 deletions
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 023fbc2d389e..aa71e2359a07 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -61,20 +61,20 @@ static void node_shift(struct node *n, int shift) if (shift < 0) { shift = -shift; BUG_ON(shift > nr_entries); - BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift, value_size)); + BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift)); memmove(key_ptr(n, 0), key_ptr(n, shift), (nr_entries - shift) * sizeof(__le64)); - memmove(value_ptr(n, 0, value_size), - value_ptr(n, shift, value_size), + memmove(value_ptr(n, 0), + value_ptr(n, shift), (nr_entries - shift) * value_size); } else { BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); memmove(key_ptr(n, shift), key_ptr(n, 0), nr_entries * sizeof(__le64)); - memmove(value_ptr(n, shift, value_size), - value_ptr(n, 0, value_size), + memmove(value_ptr(n, shift), + value_ptr(n, 0), nr_entries * value_size); } } @@ -91,16 +91,16 @@ static void node_copy(struct node *left, struct node *right, int shift) memcpy(key_ptr(left, nr_left), key_ptr(right, 0), shift * sizeof(__le64)); - memcpy(value_ptr(left, nr_left, value_size), - value_ptr(right, 0, value_size), + memcpy(value_ptr(left, nr_left), + value_ptr(right, 0), shift * value_size); } else { BUG_ON(shift > le32_to_cpu(right->header.max_entries)); memcpy(key_ptr(right, 0), key_ptr(left, nr_left - shift), shift * sizeof(__le64)); - memcpy(value_ptr(right, 0, value_size), - value_ptr(left, nr_left - shift, value_size), + memcpy(value_ptr(right, 0), + value_ptr(left, nr_left - shift), shift * value_size); } } @@ -120,26 +120,17 @@ static void delete_at(struct node *n, unsigned index) key_ptr(n, index + 1), nr_to_copy * sizeof(__le64)); - memmove(value_ptr(n, index, value_size), - value_ptr(n, index + 1, value_size), + memmove(value_ptr(n, index), + value_ptr(n, index + 1), nr_to_copy * value_size); } n->header.nr_entries = cpu_to_le32(nr_entries - 1); } -static unsigned del_threshold(struct node *n) -{ - return le32_to_cpu(n->header.max_entries) / 3; -} - static unsigned merge_threshold(struct node *n) { - /* - * The extra one is because we know we're potentially going to - * delete an entry. - */ - return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1; + return le32_to_cpu(n->header.max_entries) / 3; } struct child { @@ -175,7 +166,7 @@ static int init_child(struct dm_btree_info *info, struct node *parent, if (inc) inc_children(info->tm, result->n, &le64_type); - *((__le64 *) value_ptr(parent, index, sizeof(__le64))) = + *((__le64 *) value_ptr(parent, index)) = cpu_to_le64(dm_block_location(result->block)); return 0; @@ -188,6 +179,15 @@ static int exit_child(struct dm_btree_info *info, struct child *c) static void shift(struct node *left, struct node *right, int count) { + uint32_t nr_left = le32_to_cpu(left->header.nr_entries); + uint32_t nr_right = le32_to_cpu(right->header.nr_entries); + uint32_t max_entries = le32_to_cpu(left->header.max_entries); + uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); + + BUG_ON(max_entries != r_max_entries); + BUG_ON(nr_left - count > max_entries); + BUG_ON(nr_right + count > max_entries); + if (!count) return; @@ -199,13 +199,8 @@ static void shift(struct node *left, struct node *right, int count) node_shift(right, count); } - left->header.nr_entries = - cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count); - BUG_ON(le32_to_cpu(left->header.nr_entries) > le32_to_cpu(left->header.max_entries)); - - right->header.nr_entries = - cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count); - BUG_ON(le32_to_cpu(right->header.nr_entries) > le32_to_cpu(right->header.max_entries)); + left->header.nr_entries = cpu_to_le32(nr_left - count); + right->header.nr_entries = cpu_to_le32(nr_right + count); } static void __rebalance2(struct dm_btree_info *info, struct node *parent, @@ -215,8 +210,9 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent, struct node *right = r->n; uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); + unsigned threshold = 2 * merge_threshold(left) + 1; - if (nr_left + nr_right <= merge_threshold(left)) { + if (nr_left + nr_right < threshold) { /* * Merge */ @@ -234,9 +230,6 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent, * Rebalance. */ unsigned target_left = (nr_left + nr_right) / 2; - unsigned shift_ = nr_left - target_left; - BUG_ON(le32_to_cpu(left->header.max_entries) <= nr_left - shift_); - BUG_ON(le32_to_cpu(right->header.max_entries) <= nr_right + shift_); shift(left, right, nr_left - target_left); *key_ptr(parent, r->index) = right->keys[0]; } @@ -272,6 +265,84 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, return exit_child(info, &right); } +/* + * We dump as many entries from center as possible into left, then the rest + * in right, then rebalance2. This wastes some cpu, but I want something + * simple atm. + */ +static void delete_center_node(struct dm_btree_info *info, struct node *parent, + struct child *l, struct child *c, struct child *r, + struct node *left, struct node *center, struct node *right, + uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) +{ + uint32_t max_entries = le32_to_cpu(left->header.max_entries); + unsigned shift = min(max_entries - nr_left, nr_center); + + BUG_ON(nr_left + shift > max_entries); + node_copy(left, center, -shift); + left->header.nr_entries = cpu_to_le32(nr_left + shift); + + if (shift != nr_center) { + shift = nr_center - shift; + BUG_ON((nr_right + shift) > max_entries); + node_shift(right, shift); + node_copy(center, right, shift); + right->header.nr_entries = cpu_to_le32(nr_right + shift); + } + *key_ptr(parent, r->index) = right->keys[0]; + + delete_at(parent, c->index); + r->index--; + + dm_tm_dec(info->tm, dm_block_location(c->block)); + __rebalance2(info, parent, l, r); +} + +/* + * Redistributes entries among 3 sibling nodes. + */ +static void redistribute3(struct dm_btree_info *info, struct node *parent, + struct child *l, struct child *c, struct child *r, + struct node *left, struct node *center, struct node *right, + uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) +{ + int s; + uint32_t max_entries = le32_to_cpu(left->header.max_entries); + unsigned target = (nr_left + nr_center + nr_right) / 3; + BUG_ON(target > max_entries); + + if (nr_left < nr_right) { + s = nr_left - target; + + if (s < 0 && nr_center < -s) { + /* not enough in central node */ + shift(left, center, nr_center); + s = nr_center - target; + shift(left, right, s); + nr_right += s; + } else + shift(left, center, s); + + shift(center, right, target - nr_right); + + } else { + s = target - nr_right; + if (s > 0 && nr_center < s) { + /* not enough in central node */ + shift(center, right, nr_center); + s = target - nr_center; + shift(left, right, s); + nr_left -= s; + } else + shift(center, right, s); + + shift(left, center, nr_left - target); + } + + *key_ptr(parent, c->index) = center->keys[0]; + *key_ptr(parent, r->index) = right->keys[0]; +} + static void __rebalance3(struct dm_btree_info *info, struct node *parent, struct child *l, struct child *c, struct child *r) { @@ -282,62 +353,18 @@ static void __rebalance3(struct dm_btree_info *info, struct node *parent, uint32_t nr_left = le32_to_cpu(left->header.nr_entries); uint32_t nr_center = le32_to_cpu(center->header.nr_entries); uint32_t nr_right = le32_to_cpu(right->header.nr_entries); - uint32_t max_entries = le32_to_cpu(left->header.max_entries); - unsigned target; + unsigned threshold = merge_threshold(left) * 4 + 1; BUG_ON(left->header.max_entries != center->header.max_entries); BUG_ON(center->header.max_entries != right->header.max_entries); - if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) { - /* - * Delete center node: - * - * We dump as many entries from center as possible into - * left, then the rest in right, then rebalance2. This - * wastes some cpu, but I want something simple atm. - */ - unsigned shift = min(max_entries - nr_left, nr_center); - - BUG_ON(nr_left + shift > max_entries); - node_copy(left, center, -shift); - left->header.nr_entries = cpu_to_le32(nr_left + shift); - - if (shift != nr_center) { - shift = nr_center - shift; - BUG_ON((nr_right + shift) >= max_entries); - node_shift(right, shift); - node_copy(center, right, shift); - right->header.nr_entries = cpu_to_le32(nr_right + shift); - } - *key_ptr(parent, r->index) = right->keys[0]; - - delete_at(parent, c->index); - r->index--; - - dm_tm_dec(info->tm, dm_block_location(c->block)); - __rebalance2(info, parent, l, r); - - return; - } - - /* - * Rebalance - */ - target = (nr_left + nr_center + nr_right) / 3; - BUG_ON(target > max_entries); - - /* - * Adjust the left node - */ - shift(left, center, nr_left - target); - - /* - * Adjust the right node - */ - shift(center, right, target - nr_right); - *key_ptr(parent, c->index) = center->keys[0]; - *key_ptr(parent, r->index) = right->keys[0]; + if ((nr_left + nr_center + nr_right) < threshold) + delete_center_node(info, parent, l, c, r, left, center, right, + nr_left, nr_center, nr_right); + else + redistribute3(info, parent, l, c, r, left, center, right, + nr_left, nr_center, nr_right); } static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, @@ -441,9 +468,6 @@ static int rebalance_children(struct shadow_spine *s, if (r) return r; - if (child_entries > del_threshold(n)) - return 0; - has_left_sibling = i > 0; has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); @@ -496,7 +520,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, */ if (shadow_has_parent(s)) { __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); - memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(__le64)), + memcpy(value_ptr(dm_block_data(shadow_parent(s)), i), &location, sizeof(__le64)); } @@ -553,7 +577,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, if (info->value_type.dec) info->value_type.dec(info->value_type.context, - value_ptr(n, index, info->value_type.size)); + value_ptr(n, index)); delete_at(n, index); } |
