1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
14 #include "transaction.h"
17 #include "btrfs_inode.h"
18 #include "async-thread.h"
19 #include "free-space-cache.h"
20 #include "inode-map.h"
22 #include "print-tree.h"
25 * backref_node, mapping_node and tree_block start with this
28 struct rb_node rb_node;
33 * present a tree block in the backref cache
36 struct rb_node rb_node;
40 /* objectid of tree block owner, can be not uptodate */
42 /* link to pending, changed or detached list */
43 struct list_head list;
44 /* list of upper level blocks reference this block */
45 struct list_head upper;
46 /* list of child blocks in the cache */
47 struct list_head lower;
48 /* NULL if this node is not tree root */
49 struct btrfs_root *root;
50 /* extent buffer got by COW the block */
51 struct extent_buffer *eb;
52 /* level of tree block */
54 /* is the block in non-reference counted tree */
55 unsigned int cowonly:1;
56 /* 1 if no child node in the cache */
57 unsigned int lowest:1;
58 /* is the extent buffer locked */
59 unsigned int locked:1;
60 /* has the block been processed */
61 unsigned int processed:1;
62 /* have backrefs of this block been checked */
63 unsigned int checked:1;
65 * 1 if corresponding block has been cowed but some upper
66 * level block pointers may not point to the new location
68 unsigned int pending:1;
70 * 1 if the backref node isn't connected to any other
73 unsigned int detached:1;
77 * present a block pointer in the backref cache
80 struct list_head list[2];
81 struct backref_node *node[2];
86 #define RELOCATION_RESERVED_NODES 256
88 struct backref_cache {
89 /* red black tree of all backref nodes in the cache */
90 struct rb_root rb_root;
91 /* for passing backref nodes to btrfs_reloc_cow_block */
92 struct backref_node *path[BTRFS_MAX_LEVEL];
94 * list of blocks that have been cowed but some block
95 * pointers in upper level blocks may not reflect the
98 struct list_head pending[BTRFS_MAX_LEVEL];
99 /* list of backref nodes with no child node */
100 struct list_head leaves;
101 /* list of blocks that have been cowed in current transaction */
102 struct list_head changed;
103 /* list of detached backref node. */
104 struct list_head detached;
113 * map address of tree root to tree
115 struct mapping_node {
116 struct rb_node rb_node;
121 struct mapping_tree {
122 struct rb_root rb_root;
127 * present a tree block to process
130 struct rb_node rb_node;
132 struct btrfs_key key;
133 unsigned int level:8;
134 unsigned int key_ready:1;
137 #define MAX_EXTENTS 128
139 struct file_extent_cluster {
142 u64 boundary[MAX_EXTENTS];
146 struct reloc_control {
147 /* block group to relocate */
148 struct btrfs_block_group_cache *block_group;
150 struct btrfs_root *extent_root;
151 /* inode for moving data */
152 struct inode *data_inode;
154 struct btrfs_block_rsv *block_rsv;
156 struct backref_cache backref_cache;
158 struct file_extent_cluster cluster;
159 /* tree blocks have been processed */
160 struct extent_io_tree processed_blocks;
161 /* map start of tree root to corresponding reloc tree */
162 struct mapping_tree reloc_root_tree;
163 /* list of reloc trees */
164 struct list_head reloc_roots;
165 /* size of metadata reservation for merging reloc trees */
166 u64 merging_rsv_size;
167 /* size of relocated tree nodes */
169 /* reserved size for block group relocation*/
175 unsigned int stage:8;
176 unsigned int create_reloc_tree:1;
177 unsigned int merge_reloc_tree:1;
178 unsigned int found_file_extent:1;
181 /* stages of data relocation */
182 #define MOVE_DATA_EXTENTS 0
183 #define UPDATE_DATA_PTRS 1
185 static void remove_backref_node(struct backref_cache *cache,
186 struct backref_node *node);
187 static void __mark_block_processed(struct reloc_control *rc,
188 struct backref_node *node);
190 static void mapping_tree_init(struct mapping_tree *tree)
192 tree->rb_root = RB_ROOT;
193 spin_lock_init(&tree->lock);
196 static void backref_cache_init(struct backref_cache *cache)
199 cache->rb_root = RB_ROOT;
200 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
201 INIT_LIST_HEAD(&cache->pending[i]);
202 INIT_LIST_HEAD(&cache->changed);
203 INIT_LIST_HEAD(&cache->detached);
204 INIT_LIST_HEAD(&cache->leaves);
207 static void backref_cache_cleanup(struct backref_cache *cache)
209 struct backref_node *node;
212 while (!list_empty(&cache->detached)) {
213 node = list_entry(cache->detached.next,
214 struct backref_node, list);
215 remove_backref_node(cache, node);
218 while (!list_empty(&cache->leaves)) {
219 node = list_entry(cache->leaves.next,
220 struct backref_node, lower);
221 remove_backref_node(cache, node);
224 cache->last_trans = 0;
226 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
227 ASSERT(list_empty(&cache->pending[i]));
228 ASSERT(list_empty(&cache->changed));
229 ASSERT(list_empty(&cache->detached));
230 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
231 ASSERT(!cache->nr_nodes);
232 ASSERT(!cache->nr_edges);
235 static struct backref_node *alloc_backref_node(struct backref_cache *cache)
237 struct backref_node *node;
239 node = kzalloc(sizeof(*node), GFP_NOFS);
241 INIT_LIST_HEAD(&node->list);
242 INIT_LIST_HEAD(&node->upper);
243 INIT_LIST_HEAD(&node->lower);
244 RB_CLEAR_NODE(&node->rb_node);
250 static void free_backref_node(struct backref_cache *cache,
251 struct backref_node *node)
259 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
261 struct backref_edge *edge;
263 edge = kzalloc(sizeof(*edge), GFP_NOFS);
269 static void free_backref_edge(struct backref_cache *cache,
270 struct backref_edge *edge)
278 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
279 struct rb_node *node)
281 struct rb_node **p = &root->rb_node;
282 struct rb_node *parent = NULL;
283 struct tree_entry *entry;
287 entry = rb_entry(parent, struct tree_entry, rb_node);
289 if (bytenr < entry->bytenr)
291 else if (bytenr > entry->bytenr)
297 rb_link_node(node, parent, p);
298 rb_insert_color(node, root);
302 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
304 struct rb_node *n = root->rb_node;
305 struct tree_entry *entry;
308 entry = rb_entry(n, struct tree_entry, rb_node);
310 if (bytenr < entry->bytenr)
312 else if (bytenr > entry->bytenr)
320 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
323 struct btrfs_fs_info *fs_info = NULL;
324 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
327 fs_info = bnode->root->fs_info;
328 btrfs_panic(fs_info, errno,
329 "Inconsistency in backref cache found at offset %llu",
334 * walk up backref nodes until reach node presents tree root
336 static struct backref_node *walk_up_backref(struct backref_node *node,
337 struct backref_edge *edges[],
340 struct backref_edge *edge;
343 while (!list_empty(&node->upper)) {
344 edge = list_entry(node->upper.next,
345 struct backref_edge, list[LOWER]);
347 node = edge->node[UPPER];
349 BUG_ON(node->detached);
355 * walk down backref nodes to find start of next reference path
357 static struct backref_node *walk_down_backref(struct backref_edge *edges[],
360 struct backref_edge *edge;
361 struct backref_node *lower;
365 edge = edges[idx - 1];
366 lower = edge->node[LOWER];
367 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
371 edge = list_entry(edge->list[LOWER].next,
372 struct backref_edge, list[LOWER]);
373 edges[idx - 1] = edge;
375 return edge->node[UPPER];
381 static void unlock_node_buffer(struct backref_node *node)
384 btrfs_tree_unlock(node->eb);
389 static void drop_node_buffer(struct backref_node *node)
392 unlock_node_buffer(node);
393 free_extent_buffer(node->eb);
398 static void drop_backref_node(struct backref_cache *tree,
399 struct backref_node *node)
401 BUG_ON(!list_empty(&node->upper));
403 drop_node_buffer(node);
404 list_del(&node->list);
405 list_del(&node->lower);
406 if (!RB_EMPTY_NODE(&node->rb_node))
407 rb_erase(&node->rb_node, &tree->rb_root);
408 free_backref_node(tree, node);
412 * remove a backref node from the backref cache
414 static void remove_backref_node(struct backref_cache *cache,
415 struct backref_node *node)
417 struct backref_node *upper;
418 struct backref_edge *edge;
423 BUG_ON(!node->lowest && !node->detached);
424 while (!list_empty(&node->upper)) {
425 edge = list_entry(node->upper.next, struct backref_edge,
427 upper = edge->node[UPPER];
428 list_del(&edge->list[LOWER]);
429 list_del(&edge->list[UPPER]);
430 free_backref_edge(cache, edge);
432 if (RB_EMPTY_NODE(&upper->rb_node)) {
433 BUG_ON(!list_empty(&node->upper));
434 drop_backref_node(cache, node);
440 * add the node to leaf node list if no other
441 * child block cached.
443 if (list_empty(&upper->lower)) {
444 list_add_tail(&upper->lower, &cache->leaves);
449 drop_backref_node(cache, node);
452 static void update_backref_node(struct backref_cache *cache,
453 struct backref_node *node, u64 bytenr)
455 struct rb_node *rb_node;
456 rb_erase(&node->rb_node, &cache->rb_root);
457 node->bytenr = bytenr;
458 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
460 backref_tree_panic(rb_node, -EEXIST, bytenr);
464 * update backref cache after a transaction commit
466 static int update_backref_cache(struct btrfs_trans_handle *trans,
467 struct backref_cache *cache)
469 struct backref_node *node;
472 if (cache->last_trans == 0) {
473 cache->last_trans = trans->transid;
477 if (cache->last_trans == trans->transid)
481 * detached nodes are used to avoid unnecessary backref
482 * lookup. transaction commit changes the extent tree.
483 * so the detached nodes are no longer useful.
485 while (!list_empty(&cache->detached)) {
486 node = list_entry(cache->detached.next,
487 struct backref_node, list);
488 remove_backref_node(cache, node);
491 while (!list_empty(&cache->changed)) {
492 node = list_entry(cache->changed.next,
493 struct backref_node, list);
494 list_del_init(&node->list);
495 BUG_ON(node->pending);
496 update_backref_node(cache, node, node->new_bytenr);
500 * some nodes can be left in the pending list if there were
501 * errors during processing the pending nodes.
503 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
504 list_for_each_entry(node, &cache->pending[level], list) {
505 BUG_ON(!node->pending);
506 if (node->bytenr == node->new_bytenr)
508 update_backref_node(cache, node, node->new_bytenr);
512 cache->last_trans = 0;
517 static int should_ignore_root(struct btrfs_root *root)
519 struct btrfs_root *reloc_root;
521 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
524 reloc_root = root->reloc_root;
528 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
529 root->fs_info->running_transaction->transid - 1)
532 * if there is reloc tree and it was created in previous
533 * transaction backref lookup can find the reloc tree,
534 * so backref node for the fs tree root is useless for
540 * find reloc tree by address of tree root
542 static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
545 struct rb_node *rb_node;
546 struct mapping_node *node;
547 struct btrfs_root *root = NULL;
549 spin_lock(&rc->reloc_root_tree.lock);
550 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
552 node = rb_entry(rb_node, struct mapping_node, rb_node);
553 root = (struct btrfs_root *)node->data;
555 spin_unlock(&rc->reloc_root_tree.lock);
559 static int is_cowonly_root(u64 root_objectid)
561 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
562 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
563 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
564 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
565 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
566 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
567 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
568 root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
569 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
574 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
577 struct btrfs_key key;
579 key.objectid = root_objectid;
580 key.type = BTRFS_ROOT_ITEM_KEY;
581 if (is_cowonly_root(root_objectid))
584 key.offset = (u64)-1;
586 return btrfs_get_fs_root(fs_info, &key, false);
589 static noinline_for_stack
590 int find_inline_backref(struct extent_buffer *leaf, int slot,
591 unsigned long *ptr, unsigned long *end)
593 struct btrfs_key key;
594 struct btrfs_extent_item *ei;
595 struct btrfs_tree_block_info *bi;
598 btrfs_item_key_to_cpu(leaf, &key, slot);
600 item_size = btrfs_item_size_nr(leaf, slot);
601 if (item_size < sizeof(*ei)) {
602 btrfs_print_v0_err(leaf->fs_info);
603 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
606 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
607 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
608 BTRFS_EXTENT_FLAG_TREE_BLOCK));
610 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
611 item_size <= sizeof(*ei) + sizeof(*bi)) {
612 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
615 if (key.type == BTRFS_METADATA_ITEM_KEY &&
616 item_size <= sizeof(*ei)) {
617 WARN_ON(item_size < sizeof(*ei));
621 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
622 bi = (struct btrfs_tree_block_info *)(ei + 1);
623 *ptr = (unsigned long)(bi + 1);
625 *ptr = (unsigned long)(ei + 1);
627 *end = (unsigned long)ei + item_size;
632 * build backref tree for a given tree block. root of the backref tree
633 * corresponds the tree block, leaves of the backref tree correspond
634 * roots of b-trees that reference the tree block.
636 * the basic idea of this function is check backrefs of a given block
637 * to find upper level blocks that reference the block, and then check
638 * backrefs of these upper level blocks recursively. the recursion stop
639 * when tree root is reached or backrefs for the block is cached.
641 * NOTE: if we find backrefs for a block are cached, we know backrefs
642 * for all upper level blocks that directly/indirectly reference the
643 * block are also cached.
645 static noinline_for_stack
646 struct backref_node *build_backref_tree(struct reloc_control *rc,
647 struct btrfs_key *node_key,
648 int level, u64 bytenr)
650 struct backref_cache *cache = &rc->backref_cache;
651 struct btrfs_path *path1; /* For searching extent root */
652 struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
653 struct extent_buffer *eb;
654 struct btrfs_root *root;
655 struct backref_node *cur;
656 struct backref_node *upper;
657 struct backref_node *lower;
658 struct backref_node *node = NULL;
659 struct backref_node *exist = NULL;
660 struct backref_edge *edge;
661 struct rb_node *rb_node;
662 struct btrfs_key key;
665 LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
670 bool need_check = true;
672 path1 = btrfs_alloc_path();
673 path2 = btrfs_alloc_path();
674 if (!path1 || !path2) {
678 path1->reada = READA_FORWARD;
679 path2->reada = READA_FORWARD;
681 node = alloc_backref_node(cache);
687 node->bytenr = bytenr;
694 key.objectid = cur->bytenr;
695 key.type = BTRFS_METADATA_ITEM_KEY;
696 key.offset = (u64)-1;
698 path1->search_commit_root = 1;
699 path1->skip_locking = 1;
700 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
707 ASSERT(path1->slots[0]);
711 WARN_ON(cur->checked);
712 if (!list_empty(&cur->upper)) {
714 * the backref was added previously when processing
715 * backref of type BTRFS_TREE_BLOCK_REF_KEY
717 ASSERT(list_is_singular(&cur->upper));
718 edge = list_entry(cur->upper.next, struct backref_edge,
720 ASSERT(list_empty(&edge->list[UPPER]));
721 exist = edge->node[UPPER];
723 * add the upper level block to pending list if we need
727 list_add_tail(&edge->list[UPPER], &list);
734 eb = path1->nodes[0];
737 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
738 ret = btrfs_next_leaf(rc->extent_root, path1);
745 eb = path1->nodes[0];
748 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
749 if (key.objectid != cur->bytenr) {
754 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
755 key.type == BTRFS_METADATA_ITEM_KEY) {
756 ret = find_inline_backref(eb, path1->slots[0],
764 /* update key for inline back ref */
765 struct btrfs_extent_inline_ref *iref;
767 iref = (struct btrfs_extent_inline_ref *)ptr;
768 type = btrfs_get_extent_inline_ref_type(eb, iref,
769 BTRFS_REF_TYPE_BLOCK);
770 if (type == BTRFS_REF_TYPE_INVALID) {
775 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
777 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
778 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
782 * Parent node found and matches current inline ref, no need to
783 * rebuild this node for this inline ref.
786 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
787 exist->owner == key.offset) ||
788 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
789 exist->bytenr == key.offset))) {
794 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
795 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
796 if (key.objectid == key.offset) {
798 * Only root blocks of reloc trees use backref
799 * pointing to itself.
801 root = find_reloc_root(rc, cur->bytenr);
807 edge = alloc_backref_edge(cache);
812 rb_node = tree_search(&cache->rb_root, key.offset);
814 upper = alloc_backref_node(cache);
816 free_backref_edge(cache, edge);
820 upper->bytenr = key.offset;
821 upper->level = cur->level + 1;
823 * backrefs for the upper level block isn't
824 * cached, add the block to pending list
826 list_add_tail(&edge->list[UPPER], &list);
828 upper = rb_entry(rb_node, struct backref_node,
830 ASSERT(upper->checked);
831 INIT_LIST_HEAD(&edge->list[UPPER]);
833 list_add_tail(&edge->list[LOWER], &cur->upper);
834 edge->node[LOWER] = cur;
835 edge->node[UPPER] = upper;
838 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
840 btrfs_print_v0_err(rc->extent_root->fs_info);
841 btrfs_handle_fs_error(rc->extent_root->fs_info, err,
844 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
849 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
850 * means the root objectid. We need to search the tree to get
853 root = read_fs_root(rc->extent_root->fs_info, key.offset);
859 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
862 if (btrfs_root_level(&root->root_item) == cur->level) {
864 ASSERT(btrfs_root_bytenr(&root->root_item) ==
866 if (should_ignore_root(root))
867 list_add(&cur->list, &useless);
873 level = cur->level + 1;
875 /* Search the tree to find parent blocks referring the block. */
876 path2->search_commit_root = 1;
877 path2->skip_locking = 1;
878 path2->lowest_level = level;
879 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
880 path2->lowest_level = 0;
885 if (ret > 0 && path2->slots[level] > 0)
886 path2->slots[level]--;
888 eb = path2->nodes[level];
889 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
891 btrfs_err(root->fs_info,
892 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
893 cur->bytenr, level - 1,
894 root->root_key.objectid,
895 node_key->objectid, node_key->type,
903 /* Add all nodes and edges in the path */
904 for (; level < BTRFS_MAX_LEVEL; level++) {
905 if (!path2->nodes[level]) {
906 ASSERT(btrfs_root_bytenr(&root->root_item) ==
908 if (should_ignore_root(root))
909 list_add(&lower->list, &useless);
915 edge = alloc_backref_edge(cache);
921 eb = path2->nodes[level];
922 rb_node = tree_search(&cache->rb_root, eb->start);
924 upper = alloc_backref_node(cache);
926 free_backref_edge(cache, edge);
930 upper->bytenr = eb->start;
931 upper->owner = btrfs_header_owner(eb);
932 upper->level = lower->level + 1;
933 if (!test_bit(BTRFS_ROOT_REF_COWS,
938 * if we know the block isn't shared
939 * we can void checking its backrefs.
941 if (btrfs_block_can_be_shared(root, eb))
947 * add the block to pending list if we
948 * need check its backrefs, we only do this once
949 * while walking up a tree as we will catch
950 * anything else later on.
952 if (!upper->checked && need_check) {
954 list_add_tail(&edge->list[UPPER],
959 INIT_LIST_HEAD(&edge->list[UPPER]);
962 upper = rb_entry(rb_node, struct backref_node,
964 ASSERT(upper->checked);
965 INIT_LIST_HEAD(&edge->list[UPPER]);
967 upper->owner = btrfs_header_owner(eb);
969 list_add_tail(&edge->list[LOWER], &lower->upper);
970 edge->node[LOWER] = lower;
971 edge->node[UPPER] = upper;
978 btrfs_release_path(path2);
981 ptr += btrfs_extent_inline_ref_size(key.type);
991 btrfs_release_path(path1);
996 /* the pending list isn't empty, take the first block to process */
997 if (!list_empty(&list)) {
998 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
999 list_del_init(&edge->list[UPPER]);
1000 cur = edge->node[UPPER];
1005 * everything goes well, connect backref nodes and insert backref nodes
1008 ASSERT(node->checked);
1009 cowonly = node->cowonly;
1011 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1014 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1015 list_add_tail(&node->lower, &cache->leaves);
1018 list_for_each_entry(edge, &node->upper, list[LOWER])
1019 list_add_tail(&edge->list[UPPER], &list);
1021 while (!list_empty(&list)) {
1022 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1023 list_del_init(&edge->list[UPPER]);
1024 upper = edge->node[UPPER];
1025 if (upper->detached) {
1026 list_del(&edge->list[LOWER]);
1027 lower = edge->node[LOWER];
1028 free_backref_edge(cache, edge);
1029 if (list_empty(&lower->upper))
1030 list_add(&lower->list, &useless);
1034 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1035 if (upper->lowest) {
1036 list_del_init(&upper->lower);
1040 list_add_tail(&edge->list[UPPER], &upper->lower);
1044 if (!upper->checked) {
1046 * Still want to blow up for developers since this is a
1053 if (cowonly != upper->cowonly) {
1060 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1063 backref_tree_panic(rb_node, -EEXIST,
1067 list_add_tail(&edge->list[UPPER], &upper->lower);
1069 list_for_each_entry(edge, &upper->upper, list[LOWER])
1070 list_add_tail(&edge->list[UPPER], &list);
1073 * process useless backref nodes. backref nodes for tree leaves
1074 * are deleted from the cache. backref nodes for upper level
1075 * tree blocks are left in the cache to avoid unnecessary backref
1078 while (!list_empty(&useless)) {
1079 upper = list_entry(useless.next, struct backref_node, list);
1080 list_del_init(&upper->list);
1081 ASSERT(list_empty(&upper->upper));
1084 if (upper->lowest) {
1085 list_del_init(&upper->lower);
1088 while (!list_empty(&upper->lower)) {
1089 edge = list_entry(upper->lower.next,
1090 struct backref_edge, list[UPPER]);
1091 list_del(&edge->list[UPPER]);
1092 list_del(&edge->list[LOWER]);
1093 lower = edge->node[LOWER];
1094 free_backref_edge(cache, edge);
1096 if (list_empty(&lower->upper))
1097 list_add(&lower->list, &useless);
1099 __mark_block_processed(rc, upper);
1100 if (upper->level > 0) {
1101 list_add(&upper->list, &cache->detached);
1102 upper->detached = 1;
1104 rb_erase(&upper->rb_node, &cache->rb_root);
1105 free_backref_node(cache, upper);
1109 btrfs_free_path(path1);
1110 btrfs_free_path(path2);
1112 while (!list_empty(&useless)) {
1113 lower = list_entry(useless.next,
1114 struct backref_node, list);
1115 list_del_init(&lower->list);
1117 while (!list_empty(&list)) {
1118 edge = list_first_entry(&list, struct backref_edge,
1120 list_del(&edge->list[UPPER]);
1121 list_del(&edge->list[LOWER]);
1122 lower = edge->node[LOWER];
1123 upper = edge->node[UPPER];
1124 free_backref_edge(cache, edge);
1127 * Lower is no longer linked to any upper backref nodes
1128 * and isn't in the cache, we can free it ourselves.
1130 if (list_empty(&lower->upper) &&
1131 RB_EMPTY_NODE(&lower->rb_node))
1132 list_add(&lower->list, &useless);
1134 if (!RB_EMPTY_NODE(&upper->rb_node))
1137 /* Add this guy's upper edges to the list to process */
1138 list_for_each_entry(edge, &upper->upper, list[LOWER])
1139 list_add_tail(&edge->list[UPPER], &list);
1140 if (list_empty(&upper->upper))
1141 list_add(&upper->list, &useless);
1144 while (!list_empty(&useless)) {
1145 lower = list_entry(useless.next,
1146 struct backref_node, list);
1147 list_del_init(&lower->list);
1150 free_backref_node(cache, lower);
1153 free_backref_node(cache, node);
1154 return ERR_PTR(err);
1156 ASSERT(!node || !node->detached);
1161 * helper to add backref node for the newly created snapshot.
1162 * the backref node is created by cloning backref node that
1163 * corresponds to root of source tree
1165 static int clone_backref_node(struct btrfs_trans_handle *trans,
1166 struct reloc_control *rc,
1167 struct btrfs_root *src,
1168 struct btrfs_root *dest)
1170 struct btrfs_root *reloc_root = src->reloc_root;
1171 struct backref_cache *cache = &rc->backref_cache;
1172 struct backref_node *node = NULL;
1173 struct backref_node *new_node;
1174 struct backref_edge *edge;
1175 struct backref_edge *new_edge;
1176 struct rb_node *rb_node;
1178 if (cache->last_trans > 0)
1179 update_backref_cache(trans, cache);
1181 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1183 node = rb_entry(rb_node, struct backref_node, rb_node);
1187 BUG_ON(node->new_bytenr != reloc_root->node->start);
1191 rb_node = tree_search(&cache->rb_root,
1192 reloc_root->commit_root->start);
1194 node = rb_entry(rb_node, struct backref_node,
1196 BUG_ON(node->detached);
1203 new_node = alloc_backref_node(cache);
1207 new_node->bytenr = dest->node->start;
1208 new_node->level = node->level;
1209 new_node->lowest = node->lowest;
1210 new_node->checked = 1;
1211 new_node->root = dest;
1213 if (!node->lowest) {
1214 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1215 new_edge = alloc_backref_edge(cache);
1219 new_edge->node[UPPER] = new_node;
1220 new_edge->node[LOWER] = edge->node[LOWER];
1221 list_add_tail(&new_edge->list[UPPER],
1225 list_add_tail(&new_node->lower, &cache->leaves);
1228 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1229 &new_node->rb_node);
1231 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1233 if (!new_node->lowest) {
1234 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1235 list_add_tail(&new_edge->list[LOWER],
1236 &new_edge->node[LOWER]->upper);
1241 while (!list_empty(&new_node->lower)) {
1242 new_edge = list_entry(new_node->lower.next,
1243 struct backref_edge, list[UPPER]);
1244 list_del(&new_edge->list[UPPER]);
1245 free_backref_edge(cache, new_edge);
1247 free_backref_node(cache, new_node);
1252 * helper to add 'address of tree root -> reloc tree' mapping
1254 static int __must_check __add_reloc_root(struct btrfs_root *root)
1256 struct btrfs_fs_info *fs_info = root->fs_info;
1257 struct rb_node *rb_node;
1258 struct mapping_node *node;
1259 struct reloc_control *rc = fs_info->reloc_ctl;
1261 node = kmalloc(sizeof(*node), GFP_NOFS);
1265 node->bytenr = root->node->start;
1268 spin_lock(&rc->reloc_root_tree.lock);
1269 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1270 node->bytenr, &node->rb_node);
1271 spin_unlock(&rc->reloc_root_tree.lock);
1273 btrfs_panic(fs_info, -EEXIST,
1274 "Duplicate root found for start=%llu while inserting into relocation tree",
1278 list_add_tail(&root->root_list, &rc->reloc_roots);
1283 * helper to delete the 'address of tree root -> reloc tree'
1286 static void __del_reloc_root(struct btrfs_root *root)
1288 struct btrfs_fs_info *fs_info = root->fs_info;
1289 struct rb_node *rb_node;
1290 struct mapping_node *node = NULL;
1291 struct reloc_control *rc = fs_info->reloc_ctl;
1293 if (rc && root->node) {
1294 spin_lock(&rc->reloc_root_tree.lock);
1295 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1298 node = rb_entry(rb_node, struct mapping_node, rb_node);
1299 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1301 spin_unlock(&rc->reloc_root_tree.lock);
1304 BUG_ON((struct btrfs_root *)node->data != root);
1307 spin_lock(&fs_info->trans_lock);
1308 list_del_init(&root->root_list);
1309 spin_unlock(&fs_info->trans_lock);
1314 * helper to update the 'address of tree root -> reloc tree'
1317 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1319 struct btrfs_fs_info *fs_info = root->fs_info;
1320 struct rb_node *rb_node;
1321 struct mapping_node *node = NULL;
1322 struct reloc_control *rc = fs_info->reloc_ctl;
1324 spin_lock(&rc->reloc_root_tree.lock);
1325 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1328 node = rb_entry(rb_node, struct mapping_node, rb_node);
1329 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1331 spin_unlock(&rc->reloc_root_tree.lock);
1335 BUG_ON((struct btrfs_root *)node->data != root);
1337 spin_lock(&rc->reloc_root_tree.lock);
1338 node->bytenr = new_bytenr;
1339 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1340 node->bytenr, &node->rb_node);
1341 spin_unlock(&rc->reloc_root_tree.lock);
1343 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1347 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1348 struct btrfs_root *root, u64 objectid)
1350 struct btrfs_fs_info *fs_info = root->fs_info;
1351 struct btrfs_root *reloc_root;
1352 struct extent_buffer *eb;
1353 struct btrfs_root_item *root_item;
1354 struct btrfs_key root_key;
1357 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1360 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1361 root_key.type = BTRFS_ROOT_ITEM_KEY;
1362 root_key.offset = objectid;
1364 if (root->root_key.objectid == objectid) {
1365 u64 commit_root_gen;
1367 /* called by btrfs_init_reloc_root */
1368 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1369 BTRFS_TREE_RELOC_OBJECTID);
1372 * Set the last_snapshot field to the generation of the commit
1373 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1374 * correctly (returns true) when the relocation root is created
1375 * either inside the critical section of a transaction commit
1376 * (through transaction.c:qgroup_account_snapshot()) and when
1377 * it's created before the transaction commit is started.
1379 commit_root_gen = btrfs_header_generation(root->commit_root);
1380 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1383 * called by btrfs_reloc_post_snapshot_hook.
1384 * the source tree is a reloc tree, all tree blocks
1385 * modified after it was created have RELOC flag
1386 * set in their headers. so it's OK to not update
1387 * the 'last_snapshot'.
1389 ret = btrfs_copy_root(trans, root, root->node, &eb,
1390 BTRFS_TREE_RELOC_OBJECTID);
1394 memcpy(root_item, &root->root_item, sizeof(*root_item));
1395 btrfs_set_root_bytenr(root_item, eb->start);
1396 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1397 btrfs_set_root_generation(root_item, trans->transid);
1399 if (root->root_key.objectid == objectid) {
1400 btrfs_set_root_refs(root_item, 0);
1401 memset(&root_item->drop_progress, 0,
1402 sizeof(struct btrfs_disk_key));
1403 root_item->drop_level = 0;
1406 btrfs_tree_unlock(eb);
1407 free_extent_buffer(eb);
1409 ret = btrfs_insert_root(trans, fs_info->tree_root,
1410 &root_key, root_item);
1414 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
1415 BUG_ON(IS_ERR(reloc_root));
1416 reloc_root->last_trans = trans->transid;
1421 * create reloc tree for a given fs tree. reloc tree is just a
1422 * snapshot of the fs tree with special root objectid.
1424 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1425 struct btrfs_root *root)
1427 struct btrfs_fs_info *fs_info = root->fs_info;
1428 struct btrfs_root *reloc_root;
1429 struct reloc_control *rc = fs_info->reloc_ctl;
1430 struct btrfs_block_rsv *rsv;
1434 if (root->reloc_root) {
1435 reloc_root = root->reloc_root;
1436 reloc_root->last_trans = trans->transid;
1440 if (!rc || !rc->create_reloc_tree ||
1441 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1444 if (!trans->reloc_reserved) {
1445 rsv = trans->block_rsv;
1446 trans->block_rsv = rc->block_rsv;
1449 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1451 trans->block_rsv = rsv;
1453 ret = __add_reloc_root(reloc_root);
1455 root->reloc_root = reloc_root;
1460 * update root item of reloc tree
1462 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1463 struct btrfs_root *root)
1465 struct btrfs_fs_info *fs_info = root->fs_info;
1466 struct btrfs_root *reloc_root;
1467 struct btrfs_root_item *root_item;
1470 if (!root->reloc_root)
1473 reloc_root = root->reloc_root;
1474 root_item = &reloc_root->root_item;
1476 if (fs_info->reloc_ctl->merge_reloc_tree &&
1477 btrfs_root_refs(root_item) == 0) {
1478 root->reloc_root = NULL;
1479 __del_reloc_root(reloc_root);
1482 if (reloc_root->commit_root != reloc_root->node) {
1483 btrfs_set_root_node(root_item, reloc_root->node);
1484 free_extent_buffer(reloc_root->commit_root);
1485 reloc_root->commit_root = btrfs_root_node(reloc_root);
1488 ret = btrfs_update_root(trans, fs_info->tree_root,
1489 &reloc_root->root_key, root_item);
1497 * helper to find first cached inode with inode number >= objectid
1500 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1502 struct rb_node *node;
1503 struct rb_node *prev;
1504 struct btrfs_inode *entry;
1505 struct inode *inode;
1507 spin_lock(&root->inode_lock);
1509 node = root->inode_tree.rb_node;
1513 entry = rb_entry(node, struct btrfs_inode, rb_node);
1515 if (objectid < btrfs_ino(entry))
1516 node = node->rb_left;
1517 else if (objectid > btrfs_ino(entry))
1518 node = node->rb_right;
1524 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1525 if (objectid <= btrfs_ino(entry)) {
1529 prev = rb_next(prev);
1533 entry = rb_entry(node, struct btrfs_inode, rb_node);
1534 inode = igrab(&entry->vfs_inode);
1536 spin_unlock(&root->inode_lock);
1540 objectid = btrfs_ino(entry) + 1;
1541 if (cond_resched_lock(&root->inode_lock))
1544 node = rb_next(node);
1546 spin_unlock(&root->inode_lock);
1550 static int in_block_group(u64 bytenr,
1551 struct btrfs_block_group_cache *block_group)
1553 if (bytenr >= block_group->key.objectid &&
1554 bytenr < block_group->key.objectid + block_group->key.offset)
1560 * get new location of data
1562 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1563 u64 bytenr, u64 num_bytes)
1565 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1566 struct btrfs_path *path;
1567 struct btrfs_file_extent_item *fi;
1568 struct extent_buffer *leaf;
1571 path = btrfs_alloc_path();
1575 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1576 ret = btrfs_lookup_file_extent(NULL, root, path,
1577 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1585 leaf = path->nodes[0];
1586 fi = btrfs_item_ptr(leaf, path->slots[0],
1587 struct btrfs_file_extent_item);
1589 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1590 btrfs_file_extent_compression(leaf, fi) ||
1591 btrfs_file_extent_encryption(leaf, fi) ||
1592 btrfs_file_extent_other_encoding(leaf, fi));
1594 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1599 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1602 btrfs_free_path(path);
1607 * update file extent items in the tree leaf to point to
1608 * the new locations.
1610 static noinline_for_stack
1611 int replace_file_extents(struct btrfs_trans_handle *trans,
1612 struct reloc_control *rc,
1613 struct btrfs_root *root,
1614 struct extent_buffer *leaf)
1616 struct btrfs_fs_info *fs_info = root->fs_info;
1617 struct btrfs_key key;
1618 struct btrfs_file_extent_item *fi;
1619 struct inode *inode = NULL;
1631 if (rc->stage != UPDATE_DATA_PTRS)
1634 /* reloc trees always use full backref */
1635 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1636 parent = leaf->start;
1640 nritems = btrfs_header_nritems(leaf);
1641 for (i = 0; i < nritems; i++) {
1643 btrfs_item_key_to_cpu(leaf, &key, i);
1644 if (key.type != BTRFS_EXTENT_DATA_KEY)
1646 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1647 if (btrfs_file_extent_type(leaf, fi) ==
1648 BTRFS_FILE_EXTENT_INLINE)
1650 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1651 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1654 if (!in_block_group(bytenr, rc->block_group))
1658 * if we are modifying block in fs tree, wait for readpage
1659 * to complete and drop the extent cache
1661 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1663 inode = find_next_inode(root, key.objectid);
1665 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1666 btrfs_add_delayed_iput(inode);
1667 inode = find_next_inode(root, key.objectid);
1669 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1671 btrfs_file_extent_num_bytes(leaf, fi);
1672 WARN_ON(!IS_ALIGNED(key.offset,
1673 fs_info->sectorsize));
1674 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1676 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1681 btrfs_drop_extent_cache(BTRFS_I(inode),
1682 key.offset, end, 1);
1683 unlock_extent(&BTRFS_I(inode)->io_tree,
1688 ret = get_new_location(rc->data_inode, &new_bytenr,
1692 * Don't have to abort since we've not changed anything
1693 * in the file extent yet.
1698 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1701 key.offset -= btrfs_file_extent_offset(leaf, fi);
1702 ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1704 btrfs_header_owner(leaf),
1705 key.objectid, key.offset);
1707 btrfs_abort_transaction(trans, ret);
1711 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1712 parent, btrfs_header_owner(leaf),
1713 key.objectid, key.offset);
1715 btrfs_abort_transaction(trans, ret);
1720 btrfs_mark_buffer_dirty(leaf);
1722 btrfs_add_delayed_iput(inode);
1726 static noinline_for_stack
1727 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1728 struct btrfs_path *path, int level)
1730 struct btrfs_disk_key key1;
1731 struct btrfs_disk_key key2;
1732 btrfs_node_key(eb, &key1, slot);
1733 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1734 return memcmp(&key1, &key2, sizeof(key1));
1738 * try to replace tree blocks in fs tree with the new blocks
1739 * in reloc tree. tree blocks haven't been modified since the
1740 * reloc tree was create can be replaced.
1742 * if a block was replaced, level of the block + 1 is returned.
1743 * if no block got replaced, 0 is returned. if there are other
1744 * errors, a negative error number is returned.
1746 static noinline_for_stack
1747 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1748 struct btrfs_root *dest, struct btrfs_root *src,
1749 struct btrfs_path *path, struct btrfs_key *next_key,
1750 int lowest_level, int max_level)
1752 struct btrfs_fs_info *fs_info = dest->fs_info;
1753 struct extent_buffer *eb;
1754 struct extent_buffer *parent;
1755 struct btrfs_key key;
1767 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1768 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1770 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1772 slot = path->slots[lowest_level];
1773 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1775 eb = btrfs_lock_root_node(dest);
1776 btrfs_set_lock_blocking(eb);
1777 level = btrfs_header_level(eb);
1779 if (level < lowest_level) {
1780 btrfs_tree_unlock(eb);
1781 free_extent_buffer(eb);
1786 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1789 btrfs_set_lock_blocking(eb);
1792 next_key->objectid = (u64)-1;
1793 next_key->type = (u8)-1;
1794 next_key->offset = (u64)-1;
1799 struct btrfs_key first_key;
1801 level = btrfs_header_level(parent);
1802 BUG_ON(level < lowest_level);
1804 ret = btrfs_bin_search(parent, &key, level, &slot);
1805 if (ret && slot > 0)
1808 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1809 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1811 old_bytenr = btrfs_node_blockptr(parent, slot);
1812 blocksize = fs_info->nodesize;
1813 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1814 btrfs_node_key_to_cpu(parent, &first_key, slot);
1816 if (level <= max_level) {
1817 eb = path->nodes[level];
1818 new_bytenr = btrfs_node_blockptr(eb,
1819 path->slots[level]);
1820 new_ptr_gen = btrfs_node_ptr_generation(eb,
1821 path->slots[level]);
1827 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1832 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1833 memcmp_node_keys(parent, slot, path, level)) {
1834 if (level <= lowest_level) {
1839 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1840 level - 1, &first_key);
1844 } else if (!extent_buffer_uptodate(eb)) {
1846 free_extent_buffer(eb);
1849 btrfs_tree_lock(eb);
1851 ret = btrfs_cow_block(trans, dest, eb, parent,
1855 btrfs_set_lock_blocking(eb);
1857 btrfs_tree_unlock(parent);
1858 free_extent_buffer(parent);
1865 btrfs_tree_unlock(parent);
1866 free_extent_buffer(parent);
1871 btrfs_node_key_to_cpu(path->nodes[level], &key,
1872 path->slots[level]);
1873 btrfs_release_path(path);
1875 path->lowest_level = level;
1876 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1877 path->lowest_level = 0;
1881 * Info qgroup to trace both subtrees.
1883 * We must trace both trees.
1884 * 1) Tree reloc subtree
1885 * If not traced, we will leak data numbers
1887 * If not traced, we will double count old data
1888 * and tree block numbers, if current trans doesn't free
1889 * data reloc tree inode.
1891 ret = btrfs_qgroup_trace_subtree_swap(trans, rc->block_group,
1892 parent, slot, path->nodes[level],
1893 path->slots[level], last_snapshot);
1898 * swap blocks in fs tree and reloc tree.
1900 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1901 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1902 btrfs_mark_buffer_dirty(parent);
1904 btrfs_set_node_blockptr(path->nodes[level],
1905 path->slots[level], old_bytenr);
1906 btrfs_set_node_ptr_generation(path->nodes[level],
1907 path->slots[level], old_ptr_gen);
1908 btrfs_mark_buffer_dirty(path->nodes[level]);
1910 ret = btrfs_inc_extent_ref(trans, src, old_bytenr,
1911 blocksize, path->nodes[level]->start,
1912 src->root_key.objectid, level - 1, 0);
1914 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr,
1915 blocksize, 0, dest->root_key.objectid,
1919 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1920 path->nodes[level]->start,
1921 src->root_key.objectid, level - 1, 0);
1924 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1925 0, dest->root_key.objectid, level - 1,
1929 btrfs_unlock_up_safe(path, 0);
1934 btrfs_tree_unlock(parent);
1935 free_extent_buffer(parent);
1940 * helper to find next relocated block in reloc tree
1942 static noinline_for_stack
1943 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1946 struct extent_buffer *eb;
1951 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1953 for (i = 0; i < *level; i++) {
1954 free_extent_buffer(path->nodes[i]);
1955 path->nodes[i] = NULL;
1958 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1959 eb = path->nodes[i];
1960 nritems = btrfs_header_nritems(eb);
1961 while (path->slots[i] + 1 < nritems) {
1963 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1970 free_extent_buffer(path->nodes[i]);
1971 path->nodes[i] = NULL;
1977 * walk down reloc tree to find relocated block of lowest level
1979 static noinline_for_stack
1980 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1983 struct btrfs_fs_info *fs_info = root->fs_info;
1984 struct extent_buffer *eb = NULL;
1991 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1993 for (i = *level; i > 0; i--) {
1994 struct btrfs_key first_key;
1996 eb = path->nodes[i];
1997 nritems = btrfs_header_nritems(eb);
1998 while (path->slots[i] < nritems) {
1999 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
2000 if (ptr_gen > last_snapshot)
2004 if (path->slots[i] >= nritems) {
2015 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
2016 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
2017 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
2021 } else if (!extent_buffer_uptodate(eb)) {
2022 free_extent_buffer(eb);
2025 BUG_ON(btrfs_header_level(eb) != i - 1);
2026 path->nodes[i - 1] = eb;
2027 path->slots[i - 1] = 0;
2033 * invalidate extent cache for file extents whose key in range of
2034 * [min_key, max_key)
2036 static int invalidate_extent_cache(struct btrfs_root *root,
2037 struct btrfs_key *min_key,
2038 struct btrfs_key *max_key)
2040 struct btrfs_fs_info *fs_info = root->fs_info;
2041 struct inode *inode = NULL;
2046 objectid = min_key->objectid;
2051 if (objectid > max_key->objectid)
2054 inode = find_next_inode(root, objectid);
2057 ino = btrfs_ino(BTRFS_I(inode));
2059 if (ino > max_key->objectid) {
2065 if (!S_ISREG(inode->i_mode))
2068 if (unlikely(min_key->objectid == ino)) {
2069 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
2071 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
2074 start = min_key->offset;
2075 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
2081 if (unlikely(max_key->objectid == ino)) {
2082 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
2084 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
2087 if (max_key->offset == 0)
2089 end = max_key->offset;
2090 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
2097 /* the lock_extent waits for readpage to complete */
2098 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2099 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
2100 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2105 static int find_next_key(struct btrfs_path *path, int level,
2106 struct btrfs_key *key)
2109 while (level < BTRFS_MAX_LEVEL) {
2110 if (!path->nodes[level])
2112 if (path->slots[level] + 1 <
2113 btrfs_header_nritems(path->nodes[level])) {
2114 btrfs_node_key_to_cpu(path->nodes[level], key,
2115 path->slots[level] + 1);
2124 * merge the relocated tree blocks in reloc tree with corresponding
2127 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2128 struct btrfs_root *root)
2130 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2131 LIST_HEAD(inode_list);
2132 struct btrfs_key key;
2133 struct btrfs_key next_key;
2134 struct btrfs_trans_handle *trans = NULL;
2135 struct btrfs_root *reloc_root;
2136 struct btrfs_root_item *root_item;
2137 struct btrfs_path *path;
2138 struct extent_buffer *leaf;
2146 path = btrfs_alloc_path();
2149 path->reada = READA_FORWARD;
2151 reloc_root = root->reloc_root;
2152 root_item = &reloc_root->root_item;
2154 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2155 level = btrfs_root_level(root_item);
2156 extent_buffer_get(reloc_root->node);
2157 path->nodes[level] = reloc_root->node;
2158 path->slots[level] = 0;
2160 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2162 level = root_item->drop_level;
2164 path->lowest_level = level;
2165 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2166 path->lowest_level = 0;
2168 btrfs_free_path(path);
2172 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2173 path->slots[level]);
2174 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2176 btrfs_unlock_up_safe(path, 0);
2179 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2180 memset(&next_key, 0, sizeof(next_key));
2183 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2184 BTRFS_RESERVE_FLUSH_ALL);
2189 trans = btrfs_start_transaction(root, 0);
2190 if (IS_ERR(trans)) {
2191 err = PTR_ERR(trans);
2195 trans->block_rsv = rc->block_rsv;
2200 ret = walk_down_reloc_tree(reloc_root, path, &level);
2208 if (!find_next_key(path, level, &key) &&
2209 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2212 ret = replace_path(trans, rc, root, reloc_root, path,
2213 &next_key, level, max_level);
2222 btrfs_node_key_to_cpu(path->nodes[level], &key,
2223 path->slots[level]);
2227 ret = walk_up_reloc_tree(reloc_root, path, &level);
2233 * save the merging progress in the drop_progress.
2234 * this is OK since root refs == 1 in this case.
2236 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2237 path->slots[level]);
2238 root_item->drop_level = level;
2240 btrfs_end_transaction_throttle(trans);
2243 btrfs_btree_balance_dirty(fs_info);
2245 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2246 invalidate_extent_cache(root, &key, &next_key);
2250 * handle the case only one block in the fs tree need to be
2251 * relocated and the block is tree root.
2253 leaf = btrfs_lock_root_node(root);
2254 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2255 btrfs_tree_unlock(leaf);
2256 free_extent_buffer(leaf);
2260 btrfs_free_path(path);
2263 memset(&root_item->drop_progress, 0,
2264 sizeof(root_item->drop_progress));
2265 root_item->drop_level = 0;
2266 btrfs_set_root_refs(root_item, 0);
2267 btrfs_update_reloc_root(trans, root);
2271 btrfs_end_transaction_throttle(trans);
2273 btrfs_btree_balance_dirty(fs_info);
2275 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2276 invalidate_extent_cache(root, &key, &next_key);
2281 static noinline_for_stack
2282 int prepare_to_merge(struct reloc_control *rc, int err)
2284 struct btrfs_root *root = rc->extent_root;
2285 struct btrfs_fs_info *fs_info = root->fs_info;
2286 struct btrfs_root *reloc_root;
2287 struct btrfs_trans_handle *trans;
2288 LIST_HEAD(reloc_roots);
2292 mutex_lock(&fs_info->reloc_mutex);
2293 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2294 rc->merging_rsv_size += rc->nodes_relocated * 2;
2295 mutex_unlock(&fs_info->reloc_mutex);
2299 num_bytes = rc->merging_rsv_size;
2300 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2301 BTRFS_RESERVE_FLUSH_ALL);
2306 trans = btrfs_join_transaction(rc->extent_root);
2307 if (IS_ERR(trans)) {
2309 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2311 return PTR_ERR(trans);
2315 if (num_bytes != rc->merging_rsv_size) {
2316 btrfs_end_transaction(trans);
2317 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2323 rc->merge_reloc_tree = 1;
2325 while (!list_empty(&rc->reloc_roots)) {
2326 reloc_root = list_entry(rc->reloc_roots.next,
2327 struct btrfs_root, root_list);
2328 list_del_init(&reloc_root->root_list);
2330 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2331 BUG_ON(IS_ERR(root));
2332 BUG_ON(root->reloc_root != reloc_root);
2335 * set reference count to 1, so btrfs_recover_relocation
2336 * knows it should resumes merging
2339 btrfs_set_root_refs(&reloc_root->root_item, 1);
2340 btrfs_update_reloc_root(trans, root);
2342 list_add(&reloc_root->root_list, &reloc_roots);
2345 list_splice(&reloc_roots, &rc->reloc_roots);
2348 btrfs_commit_transaction(trans);
2350 btrfs_end_transaction(trans);
2354 static noinline_for_stack
2355 void free_reloc_roots(struct list_head *list)
2357 struct btrfs_root *reloc_root;
2359 while (!list_empty(list)) {
2360 reloc_root = list_entry(list->next, struct btrfs_root,
2362 __del_reloc_root(reloc_root);
2363 free_extent_buffer(reloc_root->node);
2364 free_extent_buffer(reloc_root->commit_root);
2365 reloc_root->node = NULL;
2366 reloc_root->commit_root = NULL;
2370 static noinline_for_stack
2371 void merge_reloc_roots(struct reloc_control *rc)
2373 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2374 struct btrfs_root *root;
2375 struct btrfs_root *reloc_root;
2376 LIST_HEAD(reloc_roots);
2380 root = rc->extent_root;
2383 * this serializes us with btrfs_record_root_in_transaction,
2384 * we have to make sure nobody is in the middle of
2385 * adding their roots to the list while we are
2388 mutex_lock(&fs_info->reloc_mutex);
2389 list_splice_init(&rc->reloc_roots, &reloc_roots);
2390 mutex_unlock(&fs_info->reloc_mutex);
2392 while (!list_empty(&reloc_roots)) {
2394 reloc_root = list_entry(reloc_roots.next,
2395 struct btrfs_root, root_list);
2397 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2398 root = read_fs_root(fs_info,
2399 reloc_root->root_key.offset);
2400 BUG_ON(IS_ERR(root));
2401 BUG_ON(root->reloc_root != reloc_root);
2403 ret = merge_reloc_root(rc, root);
2405 if (list_empty(&reloc_root->root_list))
2406 list_add_tail(&reloc_root->root_list,
2411 list_del_init(&reloc_root->root_list);
2414 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
2416 if (list_empty(&reloc_root->root_list))
2417 list_add_tail(&reloc_root->root_list,
2429 btrfs_handle_fs_error(fs_info, ret, NULL);
2430 if (!list_empty(&reloc_roots))
2431 free_reloc_roots(&reloc_roots);
2433 /* new reloc root may be added */
2434 mutex_lock(&fs_info->reloc_mutex);
2435 list_splice_init(&rc->reloc_roots, &reloc_roots);
2436 mutex_unlock(&fs_info->reloc_mutex);
2437 if (!list_empty(&reloc_roots))
2438 free_reloc_roots(&reloc_roots);
2441 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2444 static void free_block_list(struct rb_root *blocks)
2446 struct tree_block *block;
2447 struct rb_node *rb_node;
2448 while ((rb_node = rb_first(blocks))) {
2449 block = rb_entry(rb_node, struct tree_block, rb_node);
2450 rb_erase(rb_node, blocks);
2455 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2456 struct btrfs_root *reloc_root)
2458 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2459 struct btrfs_root *root;
2461 if (reloc_root->last_trans == trans->transid)
2464 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2465 BUG_ON(IS_ERR(root));
2466 BUG_ON(root->reloc_root != reloc_root);
2468 return btrfs_record_root_in_trans(trans, root);
2471 static noinline_for_stack
2472 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2473 struct reloc_control *rc,
2474 struct backref_node *node,
2475 struct backref_edge *edges[])
2477 struct backref_node *next;
2478 struct btrfs_root *root;
2484 next = walk_up_backref(next, edges, &index);
2487 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
2489 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2490 record_reloc_root_in_trans(trans, root);
2494 btrfs_record_root_in_trans(trans, root);
2495 root = root->reloc_root;
2497 if (next->new_bytenr != root->node->start) {
2498 BUG_ON(next->new_bytenr);
2499 BUG_ON(!list_empty(&next->list));
2500 next->new_bytenr = root->node->start;
2502 list_add_tail(&next->list,
2503 &rc->backref_cache.changed);
2504 __mark_block_processed(rc, next);
2510 next = walk_down_backref(edges, &index);
2511 if (!next || next->level <= node->level)
2518 /* setup backref node path for btrfs_reloc_cow_block */
2520 rc->backref_cache.path[next->level] = next;
2523 next = edges[index]->node[UPPER];
2529 * select a tree root for relocation. return NULL if the block
2530 * is reference counted. we should use do_relocation() in this
2531 * case. return a tree root pointer if the block isn't reference
2532 * counted. return -ENOENT if the block is root of reloc tree.
2534 static noinline_for_stack
2535 struct btrfs_root *select_one_root(struct backref_node *node)
2537 struct backref_node *next;
2538 struct btrfs_root *root;
2539 struct btrfs_root *fs_root = NULL;
2540 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2546 next = walk_up_backref(next, edges, &index);
2550 /* no other choice for non-references counted tree */
2551 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
2554 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2560 next = walk_down_backref(edges, &index);
2561 if (!next || next->level <= node->level)
2566 return ERR_PTR(-ENOENT);
2570 static noinline_for_stack
2571 u64 calcu_metadata_size(struct reloc_control *rc,
2572 struct backref_node *node, int reserve)
2574 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2575 struct backref_node *next = node;
2576 struct backref_edge *edge;
2577 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2581 BUG_ON(reserve && node->processed);
2586 if (next->processed && (reserve || next != node))
2589 num_bytes += fs_info->nodesize;
2591 if (list_empty(&next->upper))
2594 edge = list_entry(next->upper.next,
2595 struct backref_edge, list[LOWER]);
2596 edges[index++] = edge;
2597 next = edge->node[UPPER];
2599 next = walk_down_backref(edges, &index);
2604 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2605 struct reloc_control *rc,
2606 struct backref_node *node)
2608 struct btrfs_root *root = rc->extent_root;
2609 struct btrfs_fs_info *fs_info = root->fs_info;
2614 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2616 trans->block_rsv = rc->block_rsv;
2617 rc->reserved_bytes += num_bytes;
2620 * We are under a transaction here so we can only do limited flushing.
2621 * If we get an enospc just kick back -EAGAIN so we know to drop the
2622 * transaction and try to refill when we can flush all the things.
2624 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2625 BTRFS_RESERVE_FLUSH_LIMIT);
2627 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2628 while (tmp <= rc->reserved_bytes)
2631 * only one thread can access block_rsv at this point,
2632 * so we don't need hold lock to protect block_rsv.
2633 * we expand more reservation size here to allow enough
2634 * space for relocation and we will return eailer in
2637 rc->block_rsv->size = tmp + fs_info->nodesize *
2638 RELOCATION_RESERVED_NODES;
2646 * relocate a block tree, and then update pointers in upper level
2647 * blocks that reference the block to point to the new location.
2649 * if called by link_to_upper, the block has already been relocated.
2650 * in that case this function just updates pointers.
2652 static int do_relocation(struct btrfs_trans_handle *trans,
2653 struct reloc_control *rc,
2654 struct backref_node *node,
2655 struct btrfs_key *key,
2656 struct btrfs_path *path, int lowest)
2658 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2659 struct backref_node *upper;
2660 struct backref_edge *edge;
2661 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2662 struct btrfs_root *root;
2663 struct extent_buffer *eb;
2671 BUG_ON(lowest && node->eb);
2673 path->lowest_level = node->level + 1;
2674 rc->backref_cache.path[node->level] = node;
2675 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2676 struct btrfs_key first_key;
2680 upper = edge->node[UPPER];
2681 root = select_reloc_root(trans, rc, upper, edges);
2684 if (upper->eb && !upper->locked) {
2686 ret = btrfs_bin_search(upper->eb, key,
2687 upper->level, &slot);
2689 bytenr = btrfs_node_blockptr(upper->eb, slot);
2690 if (node->eb->start == bytenr)
2693 drop_node_buffer(upper);
2697 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2704 btrfs_release_path(path);
2709 upper->eb = path->nodes[upper->level];
2710 path->nodes[upper->level] = NULL;
2712 BUG_ON(upper->eb != path->nodes[upper->level]);
2716 path->locks[upper->level] = 0;
2718 slot = path->slots[upper->level];
2719 btrfs_release_path(path);
2721 ret = btrfs_bin_search(upper->eb, key, upper->level,
2726 bytenr = btrfs_node_blockptr(upper->eb, slot);
2728 if (bytenr != node->bytenr) {
2729 btrfs_err(root->fs_info,
2730 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2731 bytenr, node->bytenr, slot,
2737 if (node->eb->start == bytenr)
2741 blocksize = root->fs_info->nodesize;
2742 generation = btrfs_node_ptr_generation(upper->eb, slot);
2743 btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2744 eb = read_tree_block(fs_info, bytenr, generation,
2745 upper->level - 1, &first_key);
2749 } else if (!extent_buffer_uptodate(eb)) {
2750 free_extent_buffer(eb);
2754 btrfs_tree_lock(eb);
2755 btrfs_set_lock_blocking(eb);
2758 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2760 btrfs_tree_unlock(eb);
2761 free_extent_buffer(eb);
2766 BUG_ON(node->eb != eb);
2768 btrfs_set_node_blockptr(upper->eb, slot,
2770 btrfs_set_node_ptr_generation(upper->eb, slot,
2772 btrfs_mark_buffer_dirty(upper->eb);
2774 ret = btrfs_inc_extent_ref(trans, root,
2775 node->eb->start, blocksize,
2777 btrfs_header_owner(upper->eb),
2781 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2785 if (!upper->pending)
2786 drop_node_buffer(upper);
2788 unlock_node_buffer(upper);
2793 if (!err && node->pending) {
2794 drop_node_buffer(node);
2795 list_move_tail(&node->list, &rc->backref_cache.changed);
2799 path->lowest_level = 0;
2800 BUG_ON(err == -ENOSPC);
2804 static int link_to_upper(struct btrfs_trans_handle *trans,
2805 struct reloc_control *rc,
2806 struct backref_node *node,
2807 struct btrfs_path *path)
2809 struct btrfs_key key;
2811 btrfs_node_key_to_cpu(node->eb, &key, 0);
2812 return do_relocation(trans, rc, node, &key, path, 0);
2815 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2816 struct reloc_control *rc,
2817 struct btrfs_path *path, int err)
2820 struct backref_cache *cache = &rc->backref_cache;
2821 struct backref_node *node;
2825 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2826 while (!list_empty(&cache->pending[level])) {
2827 node = list_entry(cache->pending[level].next,
2828 struct backref_node, list);
2829 list_move_tail(&node->list, &list);
2830 BUG_ON(!node->pending);
2833 ret = link_to_upper(trans, rc, node, path);
2838 list_splice_init(&list, &cache->pending[level]);
2843 static void mark_block_processed(struct reloc_control *rc,
2844 u64 bytenr, u32 blocksize)
2846 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2850 static void __mark_block_processed(struct reloc_control *rc,
2851 struct backref_node *node)
2854 if (node->level == 0 ||
2855 in_block_group(node->bytenr, rc->block_group)) {
2856 blocksize = rc->extent_root->fs_info->nodesize;
2857 mark_block_processed(rc, node->bytenr, blocksize);
2859 node->processed = 1;
2863 * mark a block and all blocks directly/indirectly reference the block
2866 static void update_processed_blocks(struct reloc_control *rc,
2867 struct backref_node *node)
2869 struct backref_node *next = node;
2870 struct backref_edge *edge;
2871 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2877 if (next->processed)
2880 __mark_block_processed(rc, next);
2882 if (list_empty(&next->upper))
2885 edge = list_entry(next->upper.next,
2886 struct backref_edge, list[LOWER]);
2887 edges[index++] = edge;
2888 next = edge->node[UPPER];
2890 next = walk_down_backref(edges, &index);
2894 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2896 u32 blocksize = rc->extent_root->fs_info->nodesize;
2898 if (test_range_bit(&rc->processed_blocks, bytenr,
2899 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2904 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2905 struct tree_block *block)
2907 struct extent_buffer *eb;
2909 BUG_ON(block->key_ready);
2910 eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
2911 block->level, NULL);
2914 } else if (!extent_buffer_uptodate(eb)) {
2915 free_extent_buffer(eb);
2918 if (block->level == 0)
2919 btrfs_item_key_to_cpu(eb, &block->key, 0);
2921 btrfs_node_key_to_cpu(eb, &block->key, 0);
2922 free_extent_buffer(eb);
2923 block->key_ready = 1;
2928 * helper function to relocate a tree block
2930 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2931 struct reloc_control *rc,
2932 struct backref_node *node,
2933 struct btrfs_key *key,
2934 struct btrfs_path *path)
2936 struct btrfs_root *root;
2942 BUG_ON(node->processed);
2943 root = select_one_root(node);
2944 if (root == ERR_PTR(-ENOENT)) {
2945 update_processed_blocks(rc, node);
2949 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
2950 ret = reserve_metadata_space(trans, rc, node);
2956 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
2957 BUG_ON(node->new_bytenr);
2958 BUG_ON(!list_empty(&node->list));
2959 btrfs_record_root_in_trans(trans, root);
2960 root = root->reloc_root;
2961 node->new_bytenr = root->node->start;
2963 list_add_tail(&node->list, &rc->backref_cache.changed);
2965 path->lowest_level = node->level;
2966 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2967 btrfs_release_path(path);
2972 update_processed_blocks(rc, node);
2974 ret = do_relocation(trans, rc, node, key, path, 1);
2977 if (ret || node->level == 0 || node->cowonly)
2978 remove_backref_node(&rc->backref_cache, node);
2983 * relocate a list of blocks
2985 static noinline_for_stack
2986 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2987 struct reloc_control *rc, struct rb_root *blocks)
2989 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2990 struct backref_node *node;
2991 struct btrfs_path *path;
2992 struct tree_block *block;
2993 struct tree_block *next;
2997 path = btrfs_alloc_path();
3000 goto out_free_blocks;
3003 /* Kick in readahead for tree blocks with missing keys */
3004 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3005 if (!block->key_ready)
3006 readahead_tree_block(fs_info, block->bytenr);
3009 /* Get first keys */
3010 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3011 if (!block->key_ready) {
3012 err = get_tree_block_key(fs_info, block);
3018 /* Do tree relocation */
3019 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3020 node = build_backref_tree(rc, &block->key,
3021 block->level, block->bytenr);
3023 err = PTR_ERR(node);
3027 ret = relocate_tree_block(trans, rc, node, &block->key,
3030 if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
3036 err = finish_pending_nodes(trans, rc, path, err);
3039 btrfs_free_path(path);
3041 free_block_list(blocks);
3045 static noinline_for_stack
3046 int prealloc_file_extent_cluster(struct inode *inode,
3047 struct file_extent_cluster *cluster)
3052 u64 offset = BTRFS_I(inode)->index_cnt;
3056 u64 prealloc_start = cluster->start - offset;
3057 u64 prealloc_end = cluster->end - offset;
3059 struct extent_changeset *data_reserved = NULL;
3061 BUG_ON(cluster->start != cluster->boundary[0]);
3064 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
3065 prealloc_end + 1 - prealloc_start);
3069 cur_offset = prealloc_start;
3070 while (nr < cluster->nr) {
3071 start = cluster->boundary[nr] - offset;
3072 if (nr + 1 < cluster->nr)
3073 end = cluster->boundary[nr + 1] - 1 - offset;
3075 end = cluster->end - offset;
3077 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3078 num_bytes = end + 1 - start;
3079 if (cur_offset < start)
3080 btrfs_free_reserved_data_space(inode, data_reserved,
3081 cur_offset, start - cur_offset);
3082 ret = btrfs_prealloc_file_range(inode, 0, start,
3083 num_bytes, num_bytes,
3084 end + 1, &alloc_hint);
3085 cur_offset = end + 1;
3086 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3091 if (cur_offset < prealloc_end)
3092 btrfs_free_reserved_data_space(inode, data_reserved,
3093 cur_offset, prealloc_end + 1 - cur_offset);
3095 inode_unlock(inode);
3096 extent_changeset_free(data_reserved);
3100 static noinline_for_stack
3101 int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
3104 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3105 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3106 struct extent_map *em;
3109 em = alloc_extent_map();
3114 em->len = end + 1 - start;
3115 em->block_len = em->len;
3116 em->block_start = block_start;
3117 em->bdev = fs_info->fs_devices->latest_bdev;
3118 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3120 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3122 write_lock(&em_tree->lock);
3123 ret = add_extent_mapping(em_tree, em, 0);
3124 write_unlock(&em_tree->lock);
3125 if (ret != -EEXIST) {
3126 free_extent_map(em);
3129 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3131 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3135 static int relocate_file_extent_cluster(struct inode *inode,
3136 struct file_extent_cluster *cluster)
3138 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3141 u64 offset = BTRFS_I(inode)->index_cnt;
3142 unsigned long index;
3143 unsigned long last_index;
3145 struct file_ra_state *ra;
3146 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3153 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3157 ret = prealloc_file_extent_cluster(inode, cluster);
3161 file_ra_state_init(ra, inode->i_mapping);
3163 ret = setup_extent_mapping(inode, cluster->start - offset,
3164 cluster->end - offset, cluster->start);
3168 index = (cluster->start - offset) >> PAGE_SHIFT;
3169 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3170 while (index <= last_index) {
3171 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3176 page = find_lock_page(inode->i_mapping, index);
3178 page_cache_sync_readahead(inode->i_mapping,
3180 last_index + 1 - index);
3181 page = find_or_create_page(inode->i_mapping, index,
3184 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3191 if (PageReadahead(page)) {
3192 page_cache_async_readahead(inode->i_mapping,
3193 ra, NULL, page, index,
3194 last_index + 1 - index);
3197 if (!PageUptodate(page)) {
3198 btrfs_readpage(NULL, page);
3200 if (!PageUptodate(page)) {
3203 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3205 btrfs_delalloc_release_extents(BTRFS_I(inode),
3212 page_start = page_offset(page);
3213 page_end = page_start + PAGE_SIZE - 1;
3215 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3217 set_page_extent_mapped(page);
3219 if (nr < cluster->nr &&
3220 page_start + offset == cluster->boundary[nr]) {
3221 set_extent_bits(&BTRFS_I(inode)->io_tree,
3222 page_start, page_end,
3227 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
3232 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3234 btrfs_delalloc_release_extents(BTRFS_I(inode),