Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[muen/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40
41 #undef SCRAMBLE_DELAYED_REFS
42
43 /*
44  * control flags for do_chunk_alloc's force field
45  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46  * if we really need one.
47  *
48  * CHUNK_ALLOC_LIMITED means to only try and allocate one
49  * if we have very few chunks already allocated.  This is
50  * used as part of the clustering code to help make sure
51  * we have a good pool of storage to cluster in, without
52  * filling the FS with empty chunks
53  *
54  * CHUNK_ALLOC_FORCE means it must try to allocate one
55  *
56  */
57 enum {
58         CHUNK_ALLOC_NO_FORCE = 0,
59         CHUNK_ALLOC_LIMITED = 1,
60         CHUNK_ALLOC_FORCE = 2,
61 };
62
63 static int update_block_group(struct btrfs_trans_handle *trans,
64                               struct btrfs_root *root, u64 bytenr,
65                               u64 num_bytes, int alloc);
66 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
67                                 struct btrfs_root *root,
68                                 struct btrfs_delayed_ref_node *node, u64 parent,
69                                 u64 root_objectid, u64 owner_objectid,
70                                 u64 owner_offset, int refs_to_drop,
71                                 struct btrfs_delayed_extent_op *extra_op);
72 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
73                                     struct extent_buffer *leaf,
74                                     struct btrfs_extent_item *ei);
75 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
76                                       struct btrfs_root *root,
77                                       u64 parent, u64 root_objectid,
78                                       u64 flags, u64 owner, u64 offset,
79                                       struct btrfs_key *ins, int ref_mod);
80 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
81                                      struct btrfs_root *root,
82                                      u64 parent, u64 root_objectid,
83                                      u64 flags, struct btrfs_disk_key *key,
84                                      int level, struct btrfs_key *ins);
85 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
86                           struct btrfs_root *extent_root, u64 flags,
87                           int force);
88 static int find_next_key(struct btrfs_path *path, int level,
89                          struct btrfs_key *key);
90 static void dump_space_info(struct btrfs_fs_info *fs_info,
91                             struct btrfs_space_info *info, u64 bytes,
92                             int dump_block_groups);
93 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
94                                     u64 ram_bytes, u64 num_bytes, int delalloc);
95 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
96                                      u64 num_bytes, int delalloc);
97 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
98                                u64 num_bytes);
99 int btrfs_pin_extent(struct btrfs_root *root,
100                      u64 bytenr, u64 num_bytes, int reserved);
101 static int __reserve_metadata_bytes(struct btrfs_root *root,
102                                     struct btrfs_space_info *space_info,
103                                     u64 orig_bytes,
104                                     enum btrfs_reserve_flush_enum flush);
105 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
106                                      struct btrfs_space_info *space_info,
107                                      u64 num_bytes);
108 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
109                                      struct btrfs_space_info *space_info,
110                                      u64 num_bytes);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
271                                        bytenr, 0, &logical, &nr, &stripe_len);
272                 if (ret)
273                         return ret;
274
275                 while (nr--) {
276                         u64 start, len;
277
278                         if (logical[nr] > cache->key.objectid +
279                             cache->key.offset)
280                                 continue;
281
282                         if (logical[nr] + stripe_len <= cache->key.objectid)
283                                 continue;
284
285                         start = logical[nr];
286                         if (start < cache->key.objectid) {
287                                 start = cache->key.objectid;
288                                 len = (logical[nr] + stripe_len) - start;
289                         } else {
290                                 len = min_t(u64, stripe_len,
291                                             cache->key.objectid +
292                                             cache->key.offset - start);
293                         }
294
295                         cache->bytes_super += len;
296                         ret = add_excluded_extent(root, start, len);
297                         if (ret) {
298                                 kfree(logical);
299                                 return ret;
300                         }
301                 }
302
303                 kfree(logical);
304         }
305         return 0;
306 }
307
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
310 {
311         struct btrfs_caching_control *ctl;
312
313         spin_lock(&cache->lock);
314         if (!cache->caching_ctl) {
315                 spin_unlock(&cache->lock);
316                 return NULL;
317         }
318
319         ctl = cache->caching_ctl;
320         atomic_inc(&ctl->count);
321         spin_unlock(&cache->lock);
322         return ctl;
323 }
324
325 static void put_caching_control(struct btrfs_caching_control *ctl)
326 {
327         if (atomic_dec_and_test(&ctl->count))
328                 kfree(ctl);
329 }
330
331 #ifdef CONFIG_BTRFS_DEBUG
332 static void fragment_free_space(struct btrfs_root *root,
333                                 struct btrfs_block_group_cache *block_group)
334 {
335         u64 start = block_group->key.objectid;
336         u64 len = block_group->key.offset;
337         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
338                 root->nodesize : root->sectorsize;
339         u64 step = chunk << 1;
340
341         while (len > chunk) {
342                 btrfs_remove_free_space(block_group, start, chunk);
343                 start += step;
344                 if (len < step)
345                         len = 0;
346                 else
347                         len -= step;
348         }
349 }
350 #endif
351
352 /*
353  * this is only called by cache_block_group, since we could have freed extents
354  * we need to check the pinned_extents for any extents that can't be used yet
355  * since their free space will be released as soon as the transaction commits.
356  */
357 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
358                        struct btrfs_fs_info *info, u64 start, u64 end)
359 {
360         u64 extent_start, extent_end, size, total_added = 0;
361         int ret;
362
363         while (start < end) {
364                 ret = find_first_extent_bit(info->pinned_extents, start,
365                                             &extent_start, &extent_end,
366                                             EXTENT_DIRTY | EXTENT_UPTODATE,
367                                             NULL);
368                 if (ret)
369                         break;
370
371                 if (extent_start <= start) {
372                         start = extent_end + 1;
373                 } else if (extent_start > start && extent_start < end) {
374                         size = extent_start - start;
375                         total_added += size;
376                         ret = btrfs_add_free_space(block_group, start,
377                                                    size);
378                         BUG_ON(ret); /* -ENOMEM or logic error */
379                         start = extent_end + 1;
380                 } else {
381                         break;
382                 }
383         }
384
385         if (start < end) {
386                 size = end - start;
387                 total_added += size;
388                 ret = btrfs_add_free_space(block_group, start, size);
389                 BUG_ON(ret); /* -ENOMEM or logic error */
390         }
391
392         return total_added;
393 }
394
395 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
396 {
397         struct btrfs_block_group_cache *block_group;
398         struct btrfs_fs_info *fs_info;
399         struct btrfs_root *extent_root;
400         struct btrfs_path *path;
401         struct extent_buffer *leaf;
402         struct btrfs_key key;
403         u64 total_found = 0;
404         u64 last = 0;
405         u32 nritems;
406         int ret;
407         bool wakeup = true;
408
409         block_group = caching_ctl->block_group;
410         fs_info = block_group->fs_info;
411         extent_root = fs_info->extent_root;
412
413         path = btrfs_alloc_path();
414         if (!path)
415                 return -ENOMEM;
416
417         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
418
419 #ifdef CONFIG_BTRFS_DEBUG
420         /*
421          * If we're fragmenting we don't want to make anybody think we can
422          * allocate from this block group until we've had a chance to fragment
423          * the free space.
424          */
425         if (btrfs_should_fragment_free_space(extent_root, block_group))
426                 wakeup = false;
427 #endif
428         /*
429          * We don't want to deadlock with somebody trying to allocate a new
430          * extent for the extent root while also trying to search the extent
431          * root to add free space.  So we skip locking and search the commit
432          * root, since its read-only
433          */
434         path->skip_locking = 1;
435         path->search_commit_root = 1;
436         path->reada = READA_FORWARD;
437
438         key.objectid = last;
439         key.offset = 0;
440         key.type = BTRFS_EXTENT_ITEM_KEY;
441
442 next:
443         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
444         if (ret < 0)
445                 goto out;
446
447         leaf = path->nodes[0];
448         nritems = btrfs_header_nritems(leaf);
449
450         while (1) {
451                 if (btrfs_fs_closing(fs_info) > 1) {
452                         last = (u64)-1;
453                         break;
454                 }
455
456                 if (path->slots[0] < nritems) {
457                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
458                 } else {
459                         ret = find_next_key(path, 0, &key);
460                         if (ret)
461                                 break;
462
463                         if (need_resched() ||
464                             rwsem_is_contended(&fs_info->commit_root_sem)) {
465                                 if (wakeup)
466                                         caching_ctl->progress = last;
467                                 btrfs_release_path(path);
468                                 up_read(&fs_info->commit_root_sem);
469                                 mutex_unlock(&caching_ctl->mutex);
470                                 cond_resched();
471                                 mutex_lock(&caching_ctl->mutex);
472                                 down_read(&fs_info->commit_root_sem);
473                                 goto next;
474                         }
475
476                         ret = btrfs_next_leaf(extent_root, path);
477                         if (ret < 0)
478                                 goto out;
479                         if (ret)
480                                 break;
481                         leaf = path->nodes[0];
482                         nritems = btrfs_header_nritems(leaf);
483                         continue;
484                 }
485
486                 if (key.objectid < last) {
487                         key.objectid = last;
488                         key.offset = 0;
489                         key.type = BTRFS_EXTENT_ITEM_KEY;
490
491                         if (wakeup)
492                                 caching_ctl->progress = last;
493                         btrfs_release_path(path);
494                         goto next;
495                 }
496
497                 if (key.objectid < block_group->key.objectid) {
498                         path->slots[0]++;
499                         continue;
500                 }
501
502                 if (key.objectid >= block_group->key.objectid +
503                     block_group->key.offset)
504                         break;
505
506                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
507                     key.type == BTRFS_METADATA_ITEM_KEY) {
508                         total_found += add_new_free_space(block_group,
509                                                           fs_info, last,
510                                                           key.objectid);
511                         if (key.type == BTRFS_METADATA_ITEM_KEY)
512                                 last = key.objectid +
513                                         fs_info->tree_root->nodesize;
514                         else
515                                 last = key.objectid + key.offset;
516
517                         if (total_found > CACHING_CTL_WAKE_UP) {
518                                 total_found = 0;
519                                 if (wakeup)
520                                         wake_up(&caching_ctl->wait);
521                         }
522                 }
523                 path->slots[0]++;
524         }
525         ret = 0;
526
527         total_found += add_new_free_space(block_group, fs_info, last,
528                                           block_group->key.objectid +
529                                           block_group->key.offset);
530         caching_ctl->progress = (u64)-1;
531
532 out:
533         btrfs_free_path(path);
534         return ret;
535 }
536
537 static noinline void caching_thread(struct btrfs_work *work)
538 {
539         struct btrfs_block_group_cache *block_group;
540         struct btrfs_fs_info *fs_info;
541         struct btrfs_caching_control *caching_ctl;
542         struct btrfs_root *extent_root;
543         int ret;
544
545         caching_ctl = container_of(work, struct btrfs_caching_control, work);
546         block_group = caching_ctl->block_group;
547         fs_info = block_group->fs_info;
548         extent_root = fs_info->extent_root;
549
550         mutex_lock(&caching_ctl->mutex);
551         down_read(&fs_info->commit_root_sem);
552
553         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
554                 ret = load_free_space_tree(caching_ctl);
555         else
556                 ret = load_extent_tree_free(caching_ctl);
557
558         spin_lock(&block_group->lock);
559         block_group->caching_ctl = NULL;
560         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
561         spin_unlock(&block_group->lock);
562
563 #ifdef CONFIG_BTRFS_DEBUG
564         if (btrfs_should_fragment_free_space(extent_root, block_group)) {
565                 u64 bytes_used;
566
567                 spin_lock(&block_group->space_info->lock);
568                 spin_lock(&block_group->lock);
569                 bytes_used = block_group->key.offset -
570                         btrfs_block_group_used(&block_group->item);
571                 block_group->space_info->bytes_used += bytes_used >> 1;
572                 spin_unlock(&block_group->lock);
573                 spin_unlock(&block_group->space_info->lock);
574                 fragment_free_space(extent_root, block_group);
575         }
576 #endif
577
578         caching_ctl->progress = (u64)-1;
579
580         up_read(&fs_info->commit_root_sem);
581         free_excluded_extents(fs_info->extent_root, block_group);
582         mutex_unlock(&caching_ctl->mutex);
583
584         wake_up(&caching_ctl->wait);
585
586         put_caching_control(caching_ctl);
587         btrfs_put_block_group(block_group);
588 }
589
590 static int cache_block_group(struct btrfs_block_group_cache *cache,
591                              int load_cache_only)
592 {
593         DEFINE_WAIT(wait);
594         struct btrfs_fs_info *fs_info = cache->fs_info;
595         struct btrfs_caching_control *caching_ctl;
596         int ret = 0;
597
598         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
599         if (!caching_ctl)
600                 return -ENOMEM;
601
602         INIT_LIST_HEAD(&caching_ctl->list);
603         mutex_init(&caching_ctl->mutex);
604         init_waitqueue_head(&caching_ctl->wait);
605         caching_ctl->block_group = cache;
606         caching_ctl->progress = cache->key.objectid;
607         atomic_set(&caching_ctl->count, 1);
608         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
609                         caching_thread, NULL, NULL);
610
611         spin_lock(&cache->lock);
612         /*
613          * This should be a rare occasion, but this could happen I think in the
614          * case where one thread starts to load the space cache info, and then
615          * some other thread starts a transaction commit which tries to do an
616          * allocation while the other thread is still loading the space cache
617          * info.  The previous loop should have kept us from choosing this block
618          * group, but if we've moved to the state where we will wait on caching
619          * block groups we need to first check if we're doing a fast load here,
620          * so we can wait for it to finish, otherwise we could end up allocating
621          * from a block group who's cache gets evicted for one reason or
622          * another.
623          */
624         while (cache->cached == BTRFS_CACHE_FAST) {
625                 struct btrfs_caching_control *ctl;
626
627                 ctl = cache->caching_ctl;
628                 atomic_inc(&ctl->count);
629                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
630                 spin_unlock(&cache->lock);
631
632                 schedule();
633
634                 finish_wait(&ctl->wait, &wait);
635                 put_caching_control(ctl);
636                 spin_lock(&cache->lock);
637         }
638
639         if (cache->cached != BTRFS_CACHE_NO) {
640                 spin_unlock(&cache->lock);
641                 kfree(caching_ctl);
642                 return 0;
643         }
644         WARN_ON(cache->caching_ctl);
645         cache->caching_ctl = caching_ctl;
646         cache->cached = BTRFS_CACHE_FAST;
647         spin_unlock(&cache->lock);
648
649         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
650                 mutex_lock(&caching_ctl->mutex);
651                 ret = load_free_space_cache(fs_info, cache);
652
653                 spin_lock(&cache->lock);
654                 if (ret == 1) {
655                         cache->caching_ctl = NULL;
656                         cache->cached = BTRFS_CACHE_FINISHED;
657                         cache->last_byte_to_unpin = (u64)-1;
658                         caching_ctl->progress = (u64)-1;
659                 } else {
660                         if (load_cache_only) {
661                                 cache->caching_ctl = NULL;
662                                 cache->cached = BTRFS_CACHE_NO;
663                         } else {
664                                 cache->cached = BTRFS_CACHE_STARTED;
665                                 cache->has_caching_ctl = 1;
666                         }
667                 }
668                 spin_unlock(&cache->lock);
669 #ifdef CONFIG_BTRFS_DEBUG
670                 if (ret == 1 &&
671                     btrfs_should_fragment_free_space(fs_info->extent_root,
672                                                      cache)) {
673                         u64 bytes_used;
674
675                         spin_lock(&cache->space_info->lock);
676                         spin_lock(&cache->lock);
677                         bytes_used = cache->key.offset -
678                                 btrfs_block_group_used(&cache->item);
679                         cache->space_info->bytes_used += bytes_used >> 1;
680                         spin_unlock(&cache->lock);
681                         spin_unlock(&cache->space_info->lock);
682                         fragment_free_space(fs_info->extent_root, cache);
683                 }
684 #endif
685                 mutex_unlock(&caching_ctl->mutex);
686
687                 wake_up(&caching_ctl->wait);
688                 if (ret == 1) {
689                         put_caching_control(caching_ctl);
690                         free_excluded_extents(fs_info->extent_root, cache);
691                         return 0;
692                 }
693         } else {
694                 /*
695                  * We're either using the free space tree or no caching at all.
696                  * Set cached to the appropriate value and wakeup any waiters.
697                  */
698                 spin_lock(&cache->lock);
699                 if (load_cache_only) {
700                         cache->caching_ctl = NULL;
701                         cache->cached = BTRFS_CACHE_NO;
702                 } else {
703                         cache->cached = BTRFS_CACHE_STARTED;
704                         cache->has_caching_ctl = 1;
705                 }
706                 spin_unlock(&cache->lock);
707                 wake_up(&caching_ctl->wait);
708         }
709
710         if (load_cache_only) {
711                 put_caching_control(caching_ctl);
712                 return 0;
713         }
714
715         down_write(&fs_info->commit_root_sem);
716         atomic_inc(&caching_ctl->count);
717         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
718         up_write(&fs_info->commit_root_sem);
719
720         btrfs_get_block_group(cache);
721
722         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
723
724         return ret;
725 }
726
727 /*
728  * return the block group that starts at or after bytenr
729  */
730 static struct btrfs_block_group_cache *
731 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
732 {
733         return block_group_cache_tree_search(info, bytenr, 0);
734 }
735
736 /*
737  * return the block group that contains the given bytenr
738  */
739 struct btrfs_block_group_cache *btrfs_lookup_block_group(
740                                                  struct btrfs_fs_info *info,
741                                                  u64 bytenr)
742 {
743         return block_group_cache_tree_search(info, bytenr, 1);
744 }
745
746 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
747                                                   u64 flags)
748 {
749         struct list_head *head = &info->space_info;
750         struct btrfs_space_info *found;
751
752         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
753
754         rcu_read_lock();
755         list_for_each_entry_rcu(found, head, list) {
756                 if (found->flags & flags) {
757                         rcu_read_unlock();
758                         return found;
759                 }
760         }
761         rcu_read_unlock();
762         return NULL;
763 }
764
765 /*
766  * after adding space to the filesystem, we need to clear the full flags
767  * on all the space infos.
768  */
769 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
770 {
771         struct list_head *head = &info->space_info;
772         struct btrfs_space_info *found;
773
774         rcu_read_lock();
775         list_for_each_entry_rcu(found, head, list)
776                 found->full = 0;
777         rcu_read_unlock();
778 }
779
780 /* simple helper to search for an existing data extent at a given offset */
781 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
782 {
783         int ret;
784         struct btrfs_key key;
785         struct btrfs_path *path;
786
787         path = btrfs_alloc_path();
788         if (!path)
789                 return -ENOMEM;
790
791         key.objectid = start;
792         key.offset = len;
793         key.type = BTRFS_EXTENT_ITEM_KEY;
794         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
795                                 0, 0);
796         btrfs_free_path(path);
797         return ret;
798 }
799
800 /*
801  * helper function to lookup reference count and flags of a tree block.
802  *
803  * the head node for delayed ref is used to store the sum of all the
804  * reference count modifications queued up in the rbtree. the head
805  * node may also store the extent flags to set. This way you can check
806  * to see what the reference count and extent flags would be if all of
807  * the delayed refs are not processed.
808  */
809 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
810                              struct btrfs_root *root, u64 bytenr,
811                              u64 offset, int metadata, u64 *refs, u64 *flags)
812 {
813         struct btrfs_delayed_ref_head *head;
814         struct btrfs_delayed_ref_root *delayed_refs;
815         struct btrfs_path *path;
816         struct btrfs_extent_item *ei;
817         struct extent_buffer *leaf;
818         struct btrfs_key key;
819         u32 item_size;
820         u64 num_refs;
821         u64 extent_flags;
822         int ret;
823
824         /*
825          * If we don't have skinny metadata, don't bother doing anything
826          * different
827          */
828         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
829                 offset = root->nodesize;
830                 metadata = 0;
831         }
832
833         path = btrfs_alloc_path();
834         if (!path)
835                 return -ENOMEM;
836
837         if (!trans) {
838                 path->skip_locking = 1;
839                 path->search_commit_root = 1;
840         }
841
842 search_again:
843         key.objectid = bytenr;
844         key.offset = offset;
845         if (metadata)
846                 key.type = BTRFS_METADATA_ITEM_KEY;
847         else
848                 key.type = BTRFS_EXTENT_ITEM_KEY;
849
850         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
851                                 &key, path, 0, 0);
852         if (ret < 0)
853                 goto out_free;
854
855         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
856                 if (path->slots[0]) {
857                         path->slots[0]--;
858                         btrfs_item_key_to_cpu(path->nodes[0], &key,
859                                               path->slots[0]);
860                         if (key.objectid == bytenr &&
861                             key.type == BTRFS_EXTENT_ITEM_KEY &&
862                             key.offset == root->nodesize)
863                                 ret = 0;
864                 }
865         }
866
867         if (ret == 0) {
868                 leaf = path->nodes[0];
869                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
870                 if (item_size >= sizeof(*ei)) {
871                         ei = btrfs_item_ptr(leaf, path->slots[0],
872                                             struct btrfs_extent_item);
873                         num_refs = btrfs_extent_refs(leaf, ei);
874                         extent_flags = btrfs_extent_flags(leaf, ei);
875                 } else {
876 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
877                         struct btrfs_extent_item_v0 *ei0;
878                         BUG_ON(item_size != sizeof(*ei0));
879                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
880                                              struct btrfs_extent_item_v0);
881                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
882                         /* FIXME: this isn't correct for data */
883                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
884 #else
885                         BUG();
886 #endif
887                 }
888                 BUG_ON(num_refs == 0);
889         } else {
890                 num_refs = 0;
891                 extent_flags = 0;
892                 ret = 0;
893         }
894
895         if (!trans)
896                 goto out;
897
898         delayed_refs = &trans->transaction->delayed_refs;
899         spin_lock(&delayed_refs->lock);
900         head = btrfs_find_delayed_ref_head(trans, bytenr);
901         if (head) {
902                 if (!mutex_trylock(&head->mutex)) {
903                         atomic_inc(&head->node.refs);
904                         spin_unlock(&delayed_refs->lock);
905
906                         btrfs_release_path(path);
907
908                         /*
909                          * Mutex was contended, block until it's released and try
910                          * again
911                          */
912                         mutex_lock(&head->mutex);
913                         mutex_unlock(&head->mutex);
914                         btrfs_put_delayed_ref(&head->node);
915                         goto search_again;
916                 }
917                 spin_lock(&head->lock);
918                 if (head->extent_op && head->extent_op->update_flags)
919                         extent_flags |= head->extent_op->flags_to_set;
920                 else
921                         BUG_ON(num_refs == 0);
922
923                 num_refs += head->node.ref_mod;
924                 spin_unlock(&head->lock);
925                 mutex_unlock(&head->mutex);
926         }
927         spin_unlock(&delayed_refs->lock);
928 out:
929         WARN_ON(num_refs == 0);
930         if (refs)
931                 *refs = num_refs;
932         if (flags)
933                 *flags = extent_flags;
934 out_free:
935         btrfs_free_path(path);
936         return ret;
937 }
938
939 /*
940  * Back reference rules.  Back refs have three main goals:
941  *
942  * 1) differentiate between all holders of references to an extent so that
943  *    when a reference is dropped we can make sure it was a valid reference
944  *    before freeing the extent.
945  *
946  * 2) Provide enough information to quickly find the holders of an extent
947  *    if we notice a given block is corrupted or bad.
948  *
949  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
950  *    maintenance.  This is actually the same as #2, but with a slightly
951  *    different use case.
952  *
953  * There are two kinds of back refs. The implicit back refs is optimized
954  * for pointers in non-shared tree blocks. For a given pointer in a block,
955  * back refs of this kind provide information about the block's owner tree
956  * and the pointer's key. These information allow us to find the block by
957  * b-tree searching. The full back refs is for pointers in tree blocks not
958  * referenced by their owner trees. The location of tree block is recorded
959  * in the back refs. Actually the full back refs is generic, and can be
960  * used in all cases the implicit back refs is used. The major shortcoming
961  * of the full back refs is its overhead. Every time a tree block gets
962  * COWed, we have to update back refs entry for all pointers in it.
963  *
964  * For a newly allocated tree block, we use implicit back refs for
965  * pointers in it. This means most tree related operations only involve
966  * implicit back refs. For a tree block created in old transaction, the
967  * only way to drop a reference to it is COW it. So we can detect the
968  * event that tree block loses its owner tree's reference and do the
969  * back refs conversion.
970  *
971  * When a tree block is COWed through a tree, there are four cases:
972  *
973  * The reference count of the block is one and the tree is the block's
974  * owner tree. Nothing to do in this case.
975  *
976  * The reference count of the block is one and the tree is not the
977  * block's owner tree. In this case, full back refs is used for pointers
978  * in the block. Remove these full back refs, add implicit back refs for
979  * every pointers in the new block.
980  *
981  * The reference count of the block is greater than one and the tree is
982  * the block's owner tree. In this case, implicit back refs is used for
983  * pointers in the block. Add full back refs for every pointers in the
984  * block, increase lower level extents' reference counts. The original
985  * implicit back refs are entailed to the new block.
986  *
987  * The reference count of the block is greater than one and the tree is
988  * not the block's owner tree. Add implicit back refs for every pointer in
989  * the new block, increase lower level extents' reference count.
990  *
991  * Back Reference Key composing:
992  *
993  * The key objectid corresponds to the first byte in the extent,
994  * The key type is used to differentiate between types of back refs.
995  * There are different meanings of the key offset for different types
996  * of back refs.
997  *
998  * File extents can be referenced by:
999  *
1000  * - multiple snapshots, subvolumes, or different generations in one subvol
1001  * - different files inside a single subvolume
1002  * - different offsets inside a file (bookend extents in file.c)
1003  *
1004  * The extent ref structure for the implicit back refs has fields for:
1005  *
1006  * - Objectid of the subvolume root
1007  * - objectid of the file holding the reference
1008  * - original offset in the file
1009  * - how many bookend extents
1010  *
1011  * The key offset for the implicit back refs is hash of the first
1012  * three fields.
1013  *
1014  * The extent ref structure for the full back refs has field for:
1015  *
1016  * - number of pointers in the tree leaf
1017  *
1018  * The key offset for the implicit back refs is the first byte of
1019  * the tree leaf
1020  *
1021  * When a file extent is allocated, The implicit back refs is used.
1022  * the fields are filled in:
1023  *
1024  *     (root_key.objectid, inode objectid, offset in file, 1)
1025  *
1026  * When a file extent is removed file truncation, we find the
1027  * corresponding implicit back refs and check the following fields:
1028  *
1029  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1030  *
1031  * Btree extents can be referenced by:
1032  *
1033  * - Different subvolumes
1034  *
1035  * Both the implicit back refs and the full back refs for tree blocks
1036  * only consist of key. The key offset for the implicit back refs is
1037  * objectid of block's owner tree. The key offset for the full back refs
1038  * is the first byte of parent block.
1039  *
1040  * When implicit back refs is used, information about the lowest key and
1041  * level of the tree block are required. These information are stored in
1042  * tree block info structure.
1043  */
1044
1045 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1046 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1047                                   struct btrfs_root *root,
1048                                   struct btrfs_path *path,
1049                                   u64 owner, u32 extra_size)
1050 {
1051         struct btrfs_extent_item *item;
1052         struct btrfs_extent_item_v0 *ei0;
1053         struct btrfs_extent_ref_v0 *ref0;
1054         struct btrfs_tree_block_info *bi;
1055         struct extent_buffer *leaf;
1056         struct btrfs_key key;
1057         struct btrfs_key found_key;
1058         u32 new_size = sizeof(*item);
1059         u64 refs;
1060         int ret;
1061
1062         leaf = path->nodes[0];
1063         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1064
1065         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1066         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1067                              struct btrfs_extent_item_v0);
1068         refs = btrfs_extent_refs_v0(leaf, ei0);
1069
1070         if (owner == (u64)-1) {
1071                 while (1) {
1072                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1073                                 ret = btrfs_next_leaf(root, path);
1074                                 if (ret < 0)
1075                                         return ret;
1076                                 BUG_ON(ret > 0); /* Corruption */
1077                                 leaf = path->nodes[0];
1078                         }
1079                         btrfs_item_key_to_cpu(leaf, &found_key,
1080                                               path->slots[0]);
1081                         BUG_ON(key.objectid != found_key.objectid);
1082                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1083                                 path->slots[0]++;
1084                                 continue;
1085                         }
1086                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1087                                               struct btrfs_extent_ref_v0);
1088                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1089                         break;
1090                 }
1091         }
1092         btrfs_release_path(path);
1093
1094         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1095                 new_size += sizeof(*bi);
1096
1097         new_size -= sizeof(*ei0);
1098         ret = btrfs_search_slot(trans, root, &key, path,
1099                                 new_size + extra_size, 1);
1100         if (ret < 0)
1101                 return ret;
1102         BUG_ON(ret); /* Corruption */
1103
1104         btrfs_extend_item(root, path, new_size);
1105
1106         leaf = path->nodes[0];
1107         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1108         btrfs_set_extent_refs(leaf, item, refs);
1109         /* FIXME: get real generation */
1110         btrfs_set_extent_generation(leaf, item, 0);
1111         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1112                 btrfs_set_extent_flags(leaf, item,
1113                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1114                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1115                 bi = (struct btrfs_tree_block_info *)(item + 1);
1116                 /* FIXME: get first key of the block */
1117                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1118                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1119         } else {
1120                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1121         }
1122         btrfs_mark_buffer_dirty(leaf);
1123         return 0;
1124 }
1125 #endif
1126
1127 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1128 {
1129         u32 high_crc = ~(u32)0;
1130         u32 low_crc = ~(u32)0;
1131         __le64 lenum;
1132
1133         lenum = cpu_to_le64(root_objectid);
1134         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1135         lenum = cpu_to_le64(owner);
1136         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1137         lenum = cpu_to_le64(offset);
1138         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1139
1140         return ((u64)high_crc << 31) ^ (u64)low_crc;
1141 }
1142
1143 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1144                                      struct btrfs_extent_data_ref *ref)
1145 {
1146         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1147                                     btrfs_extent_data_ref_objectid(leaf, ref),
1148                                     btrfs_extent_data_ref_offset(leaf, ref));
1149 }
1150
1151 static int match_extent_data_ref(struct extent_buffer *leaf,
1152                                  struct btrfs_extent_data_ref *ref,
1153                                  u64 root_objectid, u64 owner, u64 offset)
1154 {
1155         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1156             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1157             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1158                 return 0;
1159         return 1;
1160 }
1161
1162 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1163                                            struct btrfs_root *root,
1164                                            struct btrfs_path *path,
1165                                            u64 bytenr, u64 parent,
1166                                            u64 root_objectid,
1167                                            u64 owner, u64 offset)
1168 {
1169         struct btrfs_key key;
1170         struct btrfs_extent_data_ref *ref;
1171         struct extent_buffer *leaf;
1172         u32 nritems;
1173         int ret;
1174         int recow;
1175         int err = -ENOENT;
1176
1177         key.objectid = bytenr;
1178         if (parent) {
1179                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1180                 key.offset = parent;
1181         } else {
1182                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1183                 key.offset = hash_extent_data_ref(root_objectid,
1184                                                   owner, offset);
1185         }
1186 again:
1187         recow = 0;
1188         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1189         if (ret < 0) {
1190                 err = ret;
1191                 goto fail;
1192         }
1193
1194         if (parent) {
1195                 if (!ret)
1196                         return 0;
1197 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1198                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1199                 btrfs_release_path(path);
1200                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1201                 if (ret < 0) {
1202                         err = ret;
1203                         goto fail;
1204                 }
1205                 if (!ret)
1206                         return 0;
1207 #endif
1208                 goto fail;
1209         }
1210
1211         leaf = path->nodes[0];
1212         nritems = btrfs_header_nritems(leaf);
1213         while (1) {
1214                 if (path->slots[0] >= nritems) {
1215                         ret = btrfs_next_leaf(root, path);
1216                         if (ret < 0)
1217                                 err = ret;
1218                         if (ret)
1219                                 goto fail;
1220
1221                         leaf = path->nodes[0];
1222                         nritems = btrfs_header_nritems(leaf);
1223                         recow = 1;
1224                 }
1225
1226                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1227                 if (key.objectid != bytenr ||
1228                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1229                         goto fail;
1230
1231                 ref = btrfs_item_ptr(leaf, path->slots[0],
1232                                      struct btrfs_extent_data_ref);
1233
1234                 if (match_extent_data_ref(leaf, ref, root_objectid,
1235                                           owner, offset)) {
1236                         if (recow) {
1237                                 btrfs_release_path(path);
1238                                 goto again;
1239                         }
1240                         err = 0;
1241                         break;
1242                 }
1243                 path->slots[0]++;
1244         }
1245 fail:
1246         return err;
1247 }
1248
1249 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1250                                            struct btrfs_root *root,
1251                                            struct btrfs_path *path,
1252                                            u64 bytenr, u64 parent,
1253                                            u64 root_objectid, u64 owner,
1254                                            u64 offset, int refs_to_add)
1255 {
1256         struct btrfs_key key;
1257         struct extent_buffer *leaf;
1258         u32 size;
1259         u32 num_refs;
1260         int ret;
1261
1262         key.objectid = bytenr;
1263         if (parent) {
1264                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1265                 key.offset = parent;
1266                 size = sizeof(struct btrfs_shared_data_ref);
1267         } else {
1268                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1269                 key.offset = hash_extent_data_ref(root_objectid,
1270                                                   owner, offset);
1271                 size = sizeof(struct btrfs_extent_data_ref);
1272         }
1273
1274         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1275         if (ret && ret != -EEXIST)
1276                 goto fail;
1277
1278         leaf = path->nodes[0];
1279         if (parent) {
1280                 struct btrfs_shared_data_ref *ref;
1281                 ref = btrfs_item_ptr(leaf, path->slots[0],
1282                                      struct btrfs_shared_data_ref);
1283                 if (ret == 0) {
1284                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1285                 } else {
1286                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1287                         num_refs += refs_to_add;
1288                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1289                 }
1290         } else {
1291                 struct btrfs_extent_data_ref *ref;
1292                 while (ret == -EEXIST) {
1293                         ref = btrfs_item_ptr(leaf, path->slots[0],
1294                                              struct btrfs_extent_data_ref);
1295                         if (match_extent_data_ref(leaf, ref, root_objectid,
1296                                                   owner, offset))
1297                                 break;
1298                         btrfs_release_path(path);
1299                         key.offset++;
1300                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1301                                                       size);
1302                         if (ret && ret != -EEXIST)
1303                                 goto fail;
1304
1305                         leaf = path->nodes[0];
1306                 }
1307                 ref = btrfs_item_ptr(leaf, path->slots[0],
1308                                      struct btrfs_extent_data_ref);
1309                 if (ret == 0) {
1310                         btrfs_set_extent_data_ref_root(leaf, ref,
1311                                                        root_objectid);
1312                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1313                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1314                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1315                 } else {
1316                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1317                         num_refs += refs_to_add;
1318                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1319                 }
1320         }
1321         btrfs_mark_buffer_dirty(leaf);
1322         ret = 0;
1323 fail:
1324         btrfs_release_path(path);
1325         return ret;
1326 }
1327
1328 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1329                                            struct btrfs_root *root,
1330                                            struct btrfs_path *path,
1331                                            int refs_to_drop, int *last_ref)
1332 {
1333         struct btrfs_key key;
1334         struct btrfs_extent_data_ref *ref1 = NULL;
1335         struct btrfs_shared_data_ref *ref2 = NULL;
1336         struct extent_buffer *leaf;
1337         u32 num_refs = 0;
1338         int ret = 0;
1339
1340         leaf = path->nodes[0];
1341         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1342
1343         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1344                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_extent_data_ref);
1346                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1347         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1348                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1349                                       struct btrfs_shared_data_ref);
1350                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1351 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1352         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1353                 struct btrfs_extent_ref_v0 *ref0;
1354                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1355                                       struct btrfs_extent_ref_v0);
1356                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1357 #endif
1358         } else {
1359                 BUG();
1360         }
1361
1362         BUG_ON(num_refs < refs_to_drop);
1363         num_refs -= refs_to_drop;
1364
1365         if (num_refs == 0) {
1366                 ret = btrfs_del_item(trans, root, path);
1367                 *last_ref = 1;
1368         } else {
1369                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1370                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1371                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1372                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1373 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1374                 else {
1375                         struct btrfs_extent_ref_v0 *ref0;
1376                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1377                                         struct btrfs_extent_ref_v0);
1378                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1379                 }
1380 #endif
1381                 btrfs_mark_buffer_dirty(leaf);
1382         }
1383         return ret;
1384 }
1385
1386 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1387                                           struct btrfs_extent_inline_ref *iref)
1388 {
1389         struct btrfs_key key;
1390         struct extent_buffer *leaf;
1391         struct btrfs_extent_data_ref *ref1;
1392         struct btrfs_shared_data_ref *ref2;
1393         u32 num_refs = 0;
1394
1395         leaf = path->nodes[0];
1396         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1397         if (iref) {
1398                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1399                     BTRFS_EXTENT_DATA_REF_KEY) {
1400                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1401                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1402                 } else {
1403                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1404                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1405                 }
1406         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1407                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1408                                       struct btrfs_extent_data_ref);
1409                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1410         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1411                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1412                                       struct btrfs_shared_data_ref);
1413                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1414 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1415         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1416                 struct btrfs_extent_ref_v0 *ref0;
1417                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1418                                       struct btrfs_extent_ref_v0);
1419                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1420 #endif
1421         } else {
1422                 WARN_ON(1);
1423         }
1424         return num_refs;
1425 }
1426
1427 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1428                                           struct btrfs_root *root,
1429                                           struct btrfs_path *path,
1430                                           u64 bytenr, u64 parent,
1431                                           u64 root_objectid)
1432 {
1433         struct btrfs_key key;
1434         int ret;
1435
1436         key.objectid = bytenr;
1437         if (parent) {
1438                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1439                 key.offset = parent;
1440         } else {
1441                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1442                 key.offset = root_objectid;
1443         }
1444
1445         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1446         if (ret > 0)
1447                 ret = -ENOENT;
1448 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1449         if (ret == -ENOENT && parent) {
1450                 btrfs_release_path(path);
1451                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1452                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1453                 if (ret > 0)
1454                         ret = -ENOENT;
1455         }
1456 #endif
1457         return ret;
1458 }
1459
1460 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1461                                           struct btrfs_root *root,
1462                                           struct btrfs_path *path,
1463                                           u64 bytenr, u64 parent,
1464                                           u64 root_objectid)
1465 {
1466         struct btrfs_key key;
1467         int ret;
1468
1469         key.objectid = bytenr;
1470         if (parent) {
1471                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1472                 key.offset = parent;
1473         } else {
1474                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1475                 key.offset = root_objectid;
1476         }
1477
1478         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1479         btrfs_release_path(path);
1480         return ret;
1481 }
1482
1483 static inline int extent_ref_type(u64 parent, u64 owner)
1484 {
1485         int type;
1486         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1487                 if (parent > 0)
1488                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1489                 else
1490                         type = BTRFS_TREE_BLOCK_REF_KEY;
1491         } else {
1492                 if (parent > 0)
1493                         type = BTRFS_SHARED_DATA_REF_KEY;
1494                 else
1495                         type = BTRFS_EXTENT_DATA_REF_KEY;
1496         }
1497         return type;
1498 }
1499
1500 static int find_next_key(struct btrfs_path *path, int level,
1501                          struct btrfs_key *key)
1502
1503 {
1504         for (; level < BTRFS_MAX_LEVEL; level++) {
1505                 if (!path->nodes[level])
1506                         break;
1507                 if (path->slots[level] + 1 >=
1508                     btrfs_header_nritems(path->nodes[level]))
1509                         continue;
1510                 if (level == 0)
1511                         btrfs_item_key_to_cpu(path->nodes[level], key,
1512                                               path->slots[level] + 1);
1513                 else
1514                         btrfs_node_key_to_cpu(path->nodes[level], key,
1515                                               path->slots[level] + 1);
1516                 return 0;
1517         }
1518         return 1;
1519 }
1520
1521 /*
1522  * look for inline back ref. if back ref is found, *ref_ret is set
1523  * to the address of inline back ref, and 0 is returned.
1524  *
1525  * if back ref isn't found, *ref_ret is set to the address where it
1526  * should be inserted, and -ENOENT is returned.
1527  *
1528  * if insert is true and there are too many inline back refs, the path
1529  * points to the extent item, and -EAGAIN is returned.
1530  *
1531  * NOTE: inline back refs are ordered in the same way that back ref
1532  *       items in the tree are ordered.
1533  */
1534 static noinline_for_stack
1535 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1536                                  struct btrfs_root *root,
1537                                  struct btrfs_path *path,
1538                                  struct btrfs_extent_inline_ref **ref_ret,
1539                                  u64 bytenr, u64 num_bytes,
1540                                  u64 parent, u64 root_objectid,
1541                                  u64 owner, u64 offset, int insert)
1542 {
1543         struct btrfs_key key;
1544         struct extent_buffer *leaf;
1545         struct btrfs_extent_item *ei;
1546         struct btrfs_extent_inline_ref *iref;
1547         u64 flags;
1548         u64 item_size;
1549         unsigned long ptr;
1550         unsigned long end;
1551         int extra_size;
1552         int type;
1553         int want;
1554         int ret;
1555         int err = 0;
1556         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1557                                                  SKINNY_METADATA);
1558
1559         key.objectid = bytenr;
1560         key.type = BTRFS_EXTENT_ITEM_KEY;
1561         key.offset = num_bytes;
1562
1563         want = extent_ref_type(parent, owner);
1564         if (insert) {
1565                 extra_size = btrfs_extent_inline_ref_size(want);
1566                 path->keep_locks = 1;
1567         } else
1568                 extra_size = -1;
1569
1570         /*
1571          * Owner is our parent level, so we can just add one to get the level
1572          * for the block we are interested in.
1573          */
1574         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1575                 key.type = BTRFS_METADATA_ITEM_KEY;
1576                 key.offset = owner;
1577         }
1578
1579 again:
1580         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1581         if (ret < 0) {
1582                 err = ret;
1583                 goto out;
1584         }
1585
1586         /*
1587          * We may be a newly converted file system which still has the old fat
1588          * extent entries for metadata, so try and see if we have one of those.
1589          */
1590         if (ret > 0 && skinny_metadata) {
1591                 skinny_metadata = false;
1592                 if (path->slots[0]) {
1593                         path->slots[0]--;
1594                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1595                                               path->slots[0]);
1596                         if (key.objectid == bytenr &&
1597                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1598                             key.offset == num_bytes)
1599                                 ret = 0;
1600                 }
1601                 if (ret) {
1602                         key.objectid = bytenr;
1603                         key.type = BTRFS_EXTENT_ITEM_KEY;
1604                         key.offset = num_bytes;
1605                         btrfs_release_path(path);
1606                         goto again;
1607                 }
1608         }
1609
1610         if (ret && !insert) {
1611                 err = -ENOENT;
1612                 goto out;
1613         } else if (WARN_ON(ret)) {
1614                 err = -EIO;
1615                 goto out;
1616         }
1617
1618         leaf = path->nodes[0];
1619         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1620 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1621         if (item_size < sizeof(*ei)) {
1622                 if (!insert) {
1623                         err = -ENOENT;
1624                         goto out;
1625                 }
1626                 ret = convert_extent_item_v0(trans, root, path, owner,
1627                                              extra_size);
1628                 if (ret < 0) {
1629                         err = ret;
1630                         goto out;
1631                 }
1632                 leaf = path->nodes[0];
1633                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1634         }
1635 #endif
1636         BUG_ON(item_size < sizeof(*ei));
1637
1638         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1639         flags = btrfs_extent_flags(leaf, ei);
1640
1641         ptr = (unsigned long)(ei + 1);
1642         end = (unsigned long)ei + item_size;
1643
1644         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1645                 ptr += sizeof(struct btrfs_tree_block_info);
1646                 BUG_ON(ptr > end);
1647         }
1648
1649         err = -ENOENT;
1650         while (1) {
1651                 if (ptr >= end) {
1652                         WARN_ON(ptr > end);
1653                         break;
1654                 }
1655                 iref = (struct btrfs_extent_inline_ref *)ptr;
1656                 type = btrfs_extent_inline_ref_type(leaf, iref);
1657                 if (want < type)
1658                         break;
1659                 if (want > type) {
1660                         ptr += btrfs_extent_inline_ref_size(type);
1661                         continue;
1662                 }
1663
1664                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1665                         struct btrfs_extent_data_ref *dref;
1666                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1667                         if (match_extent_data_ref(leaf, dref, root_objectid,
1668                                                   owner, offset)) {
1669                                 err = 0;
1670                                 break;
1671                         }
1672                         if (hash_extent_data_ref_item(leaf, dref) <
1673                             hash_extent_data_ref(root_objectid, owner, offset))
1674                                 break;
1675                 } else {
1676                         u64 ref_offset;
1677                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1678                         if (parent > 0) {
1679                                 if (parent == ref_offset) {
1680                                         err = 0;
1681                                         break;
1682                                 }
1683                                 if (ref_offset < parent)
1684                                         break;
1685                         } else {
1686                                 if (root_objectid == ref_offset) {
1687                                         err = 0;
1688                                         break;
1689                                 }
1690                                 if (ref_offset < root_objectid)
1691                                         break;
1692                         }
1693                 }
1694                 ptr += btrfs_extent_inline_ref_size(type);
1695         }
1696         if (err == -ENOENT && insert) {
1697                 if (item_size + extra_size >=
1698                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1699                         err = -EAGAIN;
1700                         goto out;
1701                 }
1702                 /*
1703                  * To add new inline back ref, we have to make sure
1704                  * there is no corresponding back ref item.
1705                  * For simplicity, we just do not add new inline back
1706                  * ref if there is any kind of item for this block
1707                  */
1708                 if (find_next_key(path, 0, &key) == 0 &&
1709                     key.objectid == bytenr &&
1710                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1711                         err = -EAGAIN;
1712                         goto out;
1713                 }
1714         }
1715         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1716 out:
1717         if (insert) {
1718                 path->keep_locks = 0;
1719                 btrfs_unlock_up_safe(path, 1);
1720         }
1721         return err;
1722 }
1723
1724 /*
1725  * helper to add new inline back ref
1726  */
1727 static noinline_for_stack
1728 void setup_inline_extent_backref(struct btrfs_root *root,
1729                                  struct btrfs_path *path,
1730                                  struct btrfs_extent_inline_ref *iref,
1731                                  u64 parent, u64 root_objectid,
1732                                  u64 owner, u64 offset, int refs_to_add,
1733                                  struct btrfs_delayed_extent_op *extent_op)
1734 {
1735         struct extent_buffer *leaf;
1736         struct btrfs_extent_item *ei;
1737         unsigned long ptr;
1738         unsigned long end;
1739         unsigned long item_offset;
1740         u64 refs;
1741         int size;
1742         int type;
1743
1744         leaf = path->nodes[0];
1745         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1746         item_offset = (unsigned long)iref - (unsigned long)ei;
1747
1748         type = extent_ref_type(parent, owner);
1749         size = btrfs_extent_inline_ref_size(type);
1750
1751         btrfs_extend_item(root, path, size);
1752
1753         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1754         refs = btrfs_extent_refs(leaf, ei);
1755         refs += refs_to_add;
1756         btrfs_set_extent_refs(leaf, ei, refs);
1757         if (extent_op)
1758                 __run_delayed_extent_op(extent_op, leaf, ei);
1759
1760         ptr = (unsigned long)ei + item_offset;
1761         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1762         if (ptr < end - size)
1763                 memmove_extent_buffer(leaf, ptr + size, ptr,
1764                                       end - size - ptr);
1765
1766         iref = (struct btrfs_extent_inline_ref *)ptr;
1767         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1768         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1769                 struct btrfs_extent_data_ref *dref;
1770                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1771                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1772                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1773                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1774                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1775         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1776                 struct btrfs_shared_data_ref *sref;
1777                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1778                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1779                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1780         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1781                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1782         } else {
1783                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1784         }
1785         btrfs_mark_buffer_dirty(leaf);
1786 }
1787
1788 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1789                                  struct btrfs_root *root,
1790                                  struct btrfs_path *path,
1791                                  struct btrfs_extent_inline_ref **ref_ret,
1792                                  u64 bytenr, u64 num_bytes, u64 parent,
1793                                  u64 root_objectid, u64 owner, u64 offset)
1794 {
1795         int ret;
1796
1797         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1798                                            bytenr, num_bytes, parent,
1799                                            root_objectid, owner, offset, 0);
1800         if (ret != -ENOENT)
1801                 return ret;
1802
1803         btrfs_release_path(path);
1804         *ref_ret = NULL;
1805
1806         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1807                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1808                                             root_objectid);
1809         } else {
1810                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1811                                              root_objectid, owner, offset);
1812         }
1813         return ret;
1814 }
1815
1816 /*
1817  * helper to update/remove inline back ref
1818  */
1819 static noinline_for_stack
1820 void update_inline_extent_backref(struct btrfs_root *root,
1821                                   struct btrfs_path *path,
1822                                   struct btrfs_extent_inline_ref *iref,
1823                                   int refs_to_mod,
1824                                   struct btrfs_delayed_extent_op *extent_op,
1825                                   int *last_ref)
1826 {
1827         struct extent_buffer *leaf;
1828         struct btrfs_extent_item *ei;
1829         struct btrfs_extent_data_ref *dref = NULL;
1830         struct btrfs_shared_data_ref *sref = NULL;
1831         unsigned long ptr;
1832         unsigned long end;
1833         u32 item_size;
1834         int size;
1835         int type;
1836         u64 refs;
1837
1838         leaf = path->nodes[0];
1839         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1840         refs = btrfs_extent_refs(leaf, ei);
1841         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1842         refs += refs_to_mod;
1843         btrfs_set_extent_refs(leaf, ei, refs);
1844         if (extent_op)
1845                 __run_delayed_extent_op(extent_op, leaf, ei);
1846
1847         type = btrfs_extent_inline_ref_type(leaf, iref);
1848
1849         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1850                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1851                 refs = btrfs_extent_data_ref_count(leaf, dref);
1852         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1853                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1854                 refs = btrfs_shared_data_ref_count(leaf, sref);
1855         } else {
1856                 refs = 1;
1857                 BUG_ON(refs_to_mod != -1);
1858         }
1859
1860         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1861         refs += refs_to_mod;
1862
1863         if (refs > 0) {
1864                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1865                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1866                 else
1867                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1868         } else {
1869                 *last_ref = 1;
1870                 size =  btrfs_extent_inline_ref_size(type);
1871                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1872                 ptr = (unsigned long)iref;
1873                 end = (unsigned long)ei + item_size;
1874                 if (ptr + size < end)
1875                         memmove_extent_buffer(leaf, ptr, ptr + size,
1876                                               end - ptr - size);
1877                 item_size -= size;
1878                 btrfs_truncate_item(root, path, item_size, 1);
1879         }
1880         btrfs_mark_buffer_dirty(leaf);
1881 }
1882
1883 static noinline_for_stack
1884 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1885                                  struct btrfs_root *root,
1886                                  struct btrfs_path *path,
1887                                  u64 bytenr, u64 num_bytes, u64 parent,
1888                                  u64 root_objectid, u64 owner,
1889                                  u64 offset, int refs_to_add,
1890                                  struct btrfs_delayed_extent_op *extent_op)
1891 {
1892         struct btrfs_extent_inline_ref *iref;
1893         int ret;
1894
1895         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1896                                            bytenr, num_bytes, parent,
1897                                            root_objectid, owner, offset, 1);
1898         if (ret == 0) {
1899                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1900                 update_inline_extent_backref(root, path, iref,
1901                                              refs_to_add, extent_op, NULL);
1902         } else if (ret == -ENOENT) {
1903                 setup_inline_extent_backref(root, path, iref, parent,
1904                                             root_objectid, owner, offset,
1905                                             refs_to_add, extent_op);
1906                 ret = 0;
1907         }
1908         return ret;
1909 }
1910
1911 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1912                                  struct btrfs_root *root,
1913                                  struct btrfs_path *path,
1914                                  u64 bytenr, u64 parent, u64 root_objectid,
1915                                  u64 owner, u64 offset, int refs_to_add)
1916 {
1917         int ret;
1918         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1919                 BUG_ON(refs_to_add != 1);
1920                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1921                                             parent, root_objectid);
1922         } else {
1923                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1924                                              parent, root_objectid,
1925                                              owner, offset, refs_to_add);
1926         }
1927         return ret;
1928 }
1929
1930 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1931                                  struct btrfs_root *root,
1932                                  struct btrfs_path *path,
1933                                  struct btrfs_extent_inline_ref *iref,
1934                                  int refs_to_drop, int is_data, int *last_ref)
1935 {
1936         int ret = 0;
1937
1938         BUG_ON(!is_data && refs_to_drop != 1);
1939         if (iref) {
1940                 update_inline_extent_backref(root, path, iref,
1941                                              -refs_to_drop, NULL, last_ref);
1942         } else if (is_data) {
1943                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1944                                              last_ref);
1945         } else {
1946                 *last_ref = 1;
1947                 ret = btrfs_del_item(trans, root, path);
1948         }
1949         return ret;
1950 }
1951
1952 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1953 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1954                                u64 *discarded_bytes)
1955 {
1956         int j, ret = 0;
1957         u64 bytes_left, end;
1958         u64 aligned_start = ALIGN(start, 1 << 9);
1959
1960         if (WARN_ON(start != aligned_start)) {
1961                 len -= aligned_start - start;
1962                 len = round_down(len, 1 << 9);
1963                 start = aligned_start;
1964         }
1965
1966         *discarded_bytes = 0;
1967
1968         if (!len)
1969                 return 0;
1970
1971         end = start + len;
1972         bytes_left = len;
1973
1974         /* Skip any superblocks on this device. */
1975         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1976                 u64 sb_start = btrfs_sb_offset(j);
1977                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1978                 u64 size = sb_start - start;
1979
1980                 if (!in_range(sb_start, start, bytes_left) &&
1981                     !in_range(sb_end, start, bytes_left) &&
1982                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1983                         continue;
1984
1985                 /*
1986                  * Superblock spans beginning of range.  Adjust start and
1987                  * try again.
1988                  */
1989                 if (sb_start <= start) {
1990                         start += sb_end - start;
1991                         if (start > end) {
1992                                 bytes_left = 0;
1993                                 break;
1994                         }
1995                         bytes_left = end - start;
1996                         continue;
1997                 }
1998
1999                 if (size) {
2000                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2001                                                    GFP_NOFS, 0);
2002                         if (!ret)
2003                                 *discarded_bytes += size;
2004                         else if (ret != -EOPNOTSUPP)
2005                                 return ret;
2006                 }
2007
2008                 start = sb_end;
2009                 if (start > end) {
2010                         bytes_left = 0;
2011                         break;
2012                 }
2013                 bytes_left = end - start;
2014         }
2015
2016         if (bytes_left) {
2017                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2018                                            GFP_NOFS, 0);
2019                 if (!ret)
2020                         *discarded_bytes += bytes_left;
2021         }
2022         return ret;
2023 }
2024
2025 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2026                          u64 num_bytes, u64 *actual_bytes)
2027 {
2028         int ret;
2029         u64 discarded_bytes = 0;
2030         struct btrfs_bio *bbio = NULL;
2031
2032
2033         /*
2034          * Avoid races with device replace and make sure our bbio has devices
2035          * associated to its stripes that don't go away while we are discarding.
2036          */
2037         btrfs_bio_counter_inc_blocked(root->fs_info);
2038         /* Tell the block device(s) that the sectors can be discarded */
2039         ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
2040                               bytenr, &num_bytes, &bbio, 0);
2041         /* Error condition is -ENOMEM */
2042         if (!ret) {
2043                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2044                 int i;
2045
2046
2047                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2048                         u64 bytes;
2049                         if (!stripe->dev->can_discard)
2050                                 continue;
2051
2052                         ret = btrfs_issue_discard(stripe->dev->bdev,
2053                                                   stripe->physical,
2054                                                   stripe->length,
2055                                                   &bytes);
2056                         if (!ret)
2057                                 discarded_bytes += bytes;
2058                         else if (ret != -EOPNOTSUPP)
2059                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2060
2061                         /*
2062                          * Just in case we get back EOPNOTSUPP for some reason,
2063                          * just ignore the return value so we don't screw up
2064                          * people calling discard_extent.
2065                          */
2066                         ret = 0;
2067                 }
2068                 btrfs_put_bbio(bbio);
2069         }
2070         btrfs_bio_counter_dec(root->fs_info);
2071
2072         if (actual_bytes)
2073                 *actual_bytes = discarded_bytes;
2074
2075
2076         if (ret == -EOPNOTSUPP)
2077                 ret = 0;
2078         return ret;
2079 }
2080
2081 /* Can return -ENOMEM */
2082 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2083                          struct btrfs_root *root,
2084                          u64 bytenr, u64 num_bytes, u64 parent,
2085                          u64 root_objectid, u64 owner, u64 offset)
2086 {
2087         int ret;
2088         struct btrfs_fs_info *fs_info = root->fs_info;
2089
2090         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2091                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2092
2093         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2094                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2095                                         num_bytes,
2096                                         parent, root_objectid, (int)owner,
2097                                         BTRFS_ADD_DELAYED_REF, NULL);
2098         } else {
2099                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2100                                         num_bytes, parent, root_objectid,
2101                                         owner, offset, 0,
2102                                         BTRFS_ADD_DELAYED_REF, NULL);
2103         }
2104         return ret;
2105 }
2106
2107 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2108                                   struct btrfs_root *root,
2109                                   struct btrfs_delayed_ref_node *node,
2110                                   u64 parent, u64 root_objectid,
2111                                   u64 owner, u64 offset, int refs_to_add,
2112                                   struct btrfs_delayed_extent_op *extent_op)
2113 {
2114         struct btrfs_fs_info *fs_info = root->fs_info;
2115         struct btrfs_path *path;
2116         struct extent_buffer *leaf;
2117         struct btrfs_extent_item *item;
2118         struct btrfs_key key;
2119         u64 bytenr = node->bytenr;
2120         u64 num_bytes = node->num_bytes;
2121         u64 refs;
2122         int ret;
2123
2124         path = btrfs_alloc_path();
2125         if (!path)
2126                 return -ENOMEM;
2127
2128         path->reada = READA_FORWARD;
2129         path->leave_spinning = 1;
2130         /* this will setup the path even if it fails to insert the back ref */
2131         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2132                                            bytenr, num_bytes, parent,
2133                                            root_objectid, owner, offset,
2134                                            refs_to_add, extent_op);
2135         if ((ret < 0 && ret != -EAGAIN) || !ret)
2136                 goto out;
2137
2138         /*
2139          * Ok we had -EAGAIN which means we didn't have space to insert and
2140          * inline extent ref, so just update the reference count and add a
2141          * normal backref.
2142          */
2143         leaf = path->nodes[0];
2144         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2145         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2146         refs = btrfs_extent_refs(leaf, item);
2147         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2148         if (extent_op)
2149                 __run_delayed_extent_op(extent_op, leaf, item);
2150
2151         btrfs_mark_buffer_dirty(leaf);
2152         btrfs_release_path(path);
2153
2154         path->reada = READA_FORWARD;
2155         path->leave_spinning = 1;
2156         /* now insert the actual backref */
2157         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2158                                     path, bytenr, parent, root_objectid,
2159                                     owner, offset, refs_to_add);
2160         if (ret)
2161                 btrfs_abort_transaction(trans, ret);
2162 out:
2163         btrfs_free_path(path);
2164         return ret;
2165 }
2166
2167 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2168                                 struct btrfs_root *root,
2169                                 struct btrfs_delayed_ref_node *node,
2170                                 struct btrfs_delayed_extent_op *extent_op,
2171                                 int insert_reserved)
2172 {
2173         int ret = 0;
2174         struct btrfs_delayed_data_ref *ref;
2175         struct btrfs_key ins;
2176         u64 parent = 0;
2177         u64 ref_root = 0;
2178         u64 flags = 0;
2179
2180         ins.objectid = node->bytenr;
2181         ins.offset = node->num_bytes;
2182         ins.type = BTRFS_EXTENT_ITEM_KEY;
2183
2184         ref = btrfs_delayed_node_to_data_ref(node);
2185         trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
2186
2187         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2188                 parent = ref->parent;
2189         ref_root = ref->root;
2190
2191         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2192                 if (extent_op)
2193                         flags |= extent_op->flags_to_set;
2194                 ret = alloc_reserved_file_extent(trans, root,
2195                                                  parent, ref_root, flags,
2196                                                  ref->objectid, ref->offset,
2197                                                  &ins, node->ref_mod);
2198         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2199                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2200                                              ref_root, ref->objectid,
2201                                              ref->offset, node->ref_mod,
2202                                              extent_op);
2203         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2204                 ret = __btrfs_free_extent(trans, root, node, parent,
2205                                           ref_root, ref->objectid,
2206                                           ref->offset, node->ref_mod,
2207                                           extent_op);
2208         } else {
2209                 BUG();
2210         }
2211         return ret;
2212 }
2213
2214 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2215                                     struct extent_buffer *leaf,
2216                                     struct btrfs_extent_item *ei)
2217 {
2218         u64 flags = btrfs_extent_flags(leaf, ei);
2219         if (extent_op->update_flags) {
2220                 flags |= extent_op->flags_to_set;
2221                 btrfs_set_extent_flags(leaf, ei, flags);
2222         }
2223
2224         if (extent_op->update_key) {
2225                 struct btrfs_tree_block_info *bi;
2226                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2227                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2228                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2229         }
2230 }
2231
2232 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2233                                  struct btrfs_root *root,
2234                                  struct btrfs_delayed_ref_node *node,
2235                                  struct btrfs_delayed_extent_op *extent_op)
2236 {
2237         struct btrfs_key key;
2238         struct btrfs_path *path;
2239         struct btrfs_extent_item *ei;
2240         struct extent_buffer *leaf;
2241         u32 item_size;
2242         int ret;
2243         int err = 0;
2244         int metadata = !extent_op->is_data;
2245
2246         if (trans->aborted)
2247                 return 0;
2248
2249         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2250                 metadata = 0;
2251
2252         path = btrfs_alloc_path();
2253         if (!path)
2254                 return -ENOMEM;
2255
2256         key.objectid = node->bytenr;
2257
2258         if (metadata) {
2259                 key.type = BTRFS_METADATA_ITEM_KEY;
2260                 key.offset = extent_op->level;
2261         } else {
2262                 key.type = BTRFS_EXTENT_ITEM_KEY;
2263                 key.offset = node->num_bytes;
2264         }
2265
2266 again:
2267         path->reada = READA_FORWARD;
2268         path->leave_spinning = 1;
2269         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2270                                 path, 0, 1);
2271         if (ret < 0) {
2272                 err = ret;
2273                 goto out;
2274         }
2275         if (ret > 0) {
2276                 if (metadata) {
2277                         if (path->slots[0] > 0) {
2278                                 path->slots[0]--;
2279                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2280                                                       path->slots[0]);
2281                                 if (key.objectid == node->bytenr &&
2282                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2283                                     key.offset == node->num_bytes)
2284                                         ret = 0;
2285                         }
2286                         if (ret > 0) {
2287                                 btrfs_release_path(path);
2288                                 metadata = 0;
2289
2290                                 key.objectid = node->bytenr;
2291                                 key.offset = node->num_bytes;
2292                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2293                                 goto again;
2294                         }
2295                 } else {
2296                         err = -EIO;
2297                         goto out;
2298                 }
2299         }
2300
2301         leaf = path->nodes[0];
2302         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2303 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2304         if (item_size < sizeof(*ei)) {
2305                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2306                                              path, (u64)-1, 0);
2307                 if (ret < 0) {
2308                         err = ret;
2309                         goto out;
2310                 }
2311                 leaf = path->nodes[0];
2312                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2313         }
2314 #endif
2315         BUG_ON(item_size < sizeof(*ei));
2316         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2317         __run_delayed_extent_op(extent_op, leaf, ei);
2318
2319         btrfs_mark_buffer_dirty(leaf);
2320 out:
2321         btrfs_free_path(path);
2322         return err;
2323 }
2324
2325 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2326                                 struct btrfs_root *root,
2327                                 struct btrfs_delayed_ref_node *node,
2328                                 struct btrfs_delayed_extent_op *extent_op,
2329                                 int insert_reserved)
2330 {
2331         int ret = 0;
2332         struct btrfs_delayed_tree_ref *ref;
2333         struct btrfs_key ins;
2334         u64 parent = 0;
2335         u64 ref_root = 0;
2336         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2337                                                  SKINNY_METADATA);
2338
2339         ref = btrfs_delayed_node_to_tree_ref(node);
2340         trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
2341
2342         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2343                 parent = ref->parent;
2344         ref_root = ref->root;
2345
2346         ins.objectid = node->bytenr;
2347         if (skinny_metadata) {
2348                 ins.offset = ref->level;
2349                 ins.type = BTRFS_METADATA_ITEM_KEY;
2350         } else {
2351                 ins.offset = node->num_bytes;
2352                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2353         }
2354
2355         if (node->ref_mod != 1) {
2356                 btrfs_err(root->fs_info,
2357         "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2358                           node->bytenr, node->ref_mod, node->action, ref_root,
2359                           parent);
2360                 return -EIO;
2361         }
2362         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2363                 BUG_ON(!extent_op || !extent_op->update_flags);
2364                 ret = alloc_reserved_tree_block(trans, root,
2365                                                 parent, ref_root,
2366                                                 extent_op->flags_to_set,
2367                                                 &extent_op->key,
2368                                                 ref->level, &ins);
2369         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2370                 ret = __btrfs_inc_extent_ref(trans, root, node,
2371                                              parent, ref_root,
2372                                              ref->level, 0, 1,
2373                                              extent_op);
2374         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2375                 ret = __btrfs_free_extent(trans, root, node,
2376                                           parent, ref_root,
2377                                           ref->level, 0, 1, extent_op);
2378         } else {
2379                 BUG();
2380         }
2381         return ret;
2382 }
2383
2384 /* helper function to actually process a single delayed ref entry */
2385 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2386                                struct btrfs_root *root,
2387                                struct btrfs_delayed_ref_node *node,
2388                                struct btrfs_delayed_extent_op *extent_op,
2389                                int insert_reserved)
2390 {
2391         int ret = 0;
2392
2393         if (trans->aborted) {
2394                 if (insert_reserved)
2395                         btrfs_pin_extent(root, node->bytenr,
2396                                          node->num_bytes, 1);
2397                 return 0;
2398         }
2399
2400         if (btrfs_delayed_ref_is_head(node)) {
2401                 struct btrfs_delayed_ref_head *head;
2402                 /*
2403                  * we've hit the end of the chain and we were supposed
2404                  * to insert this extent into the tree.  But, it got
2405                  * deleted before we ever needed to insert it, so all
2406                  * we have to do is clean up the accounting
2407                  */
2408                 BUG_ON(extent_op);
2409                 head = btrfs_delayed_node_to_head(node);
2410                 trace_run_delayed_ref_head(root->fs_info, node, head,
2411                                            node->action);
2412
2413                 if (insert_reserved) {
2414                         btrfs_pin_extent(root, node->bytenr,
2415                                          node->num_bytes, 1);
2416                         if (head->is_data) {
2417                                 ret = btrfs_del_csums(trans, root,
2418                                                       node->bytenr,
2419                                                       node->num_bytes);
2420                         }
2421                 }
2422
2423                 /* Also free its reserved qgroup space */
2424                 btrfs_qgroup_free_delayed_ref(root->fs_info,
2425                                               head->qgroup_ref_root,
2426                                               head->qgroup_reserved);
2427                 return ret;
2428         }
2429
2430         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2431             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2432                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2433                                            insert_reserved);
2434         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2435                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2436                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2437                                            insert_reserved);
2438         else
2439                 BUG();
2440         return ret;
2441 }
2442
2443 static inline struct btrfs_delayed_ref_node *
2444 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2445 {
2446         struct btrfs_delayed_ref_node *ref;
2447
2448         if (list_empty(&head->ref_list))
2449                 return NULL;
2450
2451         /*
2452          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2453          * This is to prevent a ref count from going down to zero, which deletes
2454          * the extent item from the extent tree, when there still are references
2455          * to add, which would fail because they would not find the extent item.
2456          */
2457         list_for_each_entry(ref, &head->ref_list, list) {
2458                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2459                         return ref;
2460         }
2461
2462         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2463                           list);
2464 }
2465
2466 /*
2467  * Returns 0 on success or if called with an already aborted transaction.
2468  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2469  */
2470 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2471                                              struct btrfs_root *root,
2472                                              unsigned long nr)
2473 {
2474         struct btrfs_delayed_ref_root *delayed_refs;
2475         struct btrfs_delayed_ref_node *ref;
2476         struct btrfs_delayed_ref_head *locked_ref = NULL;
2477         struct btrfs_delayed_extent_op *extent_op;
2478         struct btrfs_fs_info *fs_info = root->fs_info;
2479         ktime_t start = ktime_get();
2480         int ret;
2481         unsigned long count = 0;
2482         unsigned long actual_count = 0;
2483         int must_insert_reserved = 0;
2484
2485         delayed_refs = &trans->transaction->delayed_refs;
2486         while (1) {
2487                 if (!locked_ref) {
2488                         if (count >= nr)
2489                                 break;
2490
2491                         spin_lock(&delayed_refs->lock);
2492                         locked_ref = btrfs_select_ref_head(trans);
2493                         if (!locked_ref) {
2494                                 spin_unlock(&delayed_refs->lock);
2495                                 break;
2496                         }
2497
2498                         /* grab the lock that says we are going to process
2499                          * all the refs for this head */
2500                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2501                         spin_unlock(&delayed_refs->lock);
2502                         /*
2503                          * we may have dropped the spin lock to get the head
2504                          * mutex lock, and that might have given someone else
2505                          * time to free the head.  If that's true, it has been
2506                          * removed from our list and we can move on.
2507                          */
2508                         if (ret == -EAGAIN) {
2509                                 locked_ref = NULL;
2510                                 count++;
2511                                 continue;
2512                         }
2513                 }
2514
2515                 /*
2516                  * We need to try and merge add/drops of the same ref since we
2517                  * can run into issues with relocate dropping the implicit ref
2518                  * and then it being added back again before the drop can
2519                  * finish.  If we merged anything we need to re-loop so we can
2520                  * get a good ref.
2521                  * Or we can get node references of the same type that weren't
2522                  * merged when created due to bumps in the tree mod seq, and
2523                  * we need to merge them to prevent adding an inline extent
2524                  * backref before dropping it (triggering a BUG_ON at
2525                  * insert_inline_extent_backref()).
2526                  */
2527                 spin_lock(&locked_ref->lock);
2528                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2529                                          locked_ref);
2530
2531                 /*
2532                  * locked_ref is the head node, so we have to go one
2533                  * node back for any delayed ref updates
2534                  */
2535                 ref = select_delayed_ref(locked_ref);
2536
2537                 if (ref && ref->seq &&
2538                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2539                         spin_unlock(&locked_ref->lock);
2540                         btrfs_delayed_ref_unlock(locked_ref);
2541                         spin_lock(&delayed_refs->lock);
2542                         locked_ref->processing = 0;
2543                         delayed_refs->num_heads_ready++;
2544                         spin_unlock(&delayed_refs->lock);
2545                         locked_ref = NULL;
2546                         cond_resched();
2547                         count++;
2548                         continue;
2549                 }
2550
2551                 /*
2552                  * record the must insert reserved flag before we
2553                  * drop the spin lock.
2554                  */
2555                 must_insert_reserved = locked_ref->must_insert_reserved;
2556                 locked_ref->must_insert_reserved = 0;
2557
2558                 extent_op = locked_ref->extent_op;
2559                 locked_ref->extent_op = NULL;
2560
2561                 if (!ref) {
2562
2563
2564                         /* All delayed refs have been processed, Go ahead
2565                          * and send the head node to run_one_delayed_ref,
2566                          * so that any accounting fixes can happen
2567                          */
2568                         ref = &locked_ref->node;
2569
2570                         if (extent_op && must_insert_reserved) {
2571                                 btrfs_free_delayed_extent_op(extent_op);
2572                                 extent_op = NULL;
2573                         }
2574
2575                         if (extent_op) {
2576                                 spin_unlock(&locked_ref->lock);
2577                                 ret = run_delayed_extent_op(trans, root,
2578                                                             ref, extent_op);
2579                                 btrfs_free_delayed_extent_op(extent_op);
2580
2581                                 if (ret) {
2582                                         /*
2583                                          * Need to reset must_insert_reserved if
2584                                          * there was an error so the abort stuff
2585                                          * can cleanup the reserved space
2586                                          * properly.
2587                                          */
2588                                         if (must_insert_reserved)
2589                                                 locked_ref->must_insert_reserved = 1;
2590                                         locked_ref->processing = 0;
2591                                         btrfs_debug(fs_info,
2592                                                     "run_delayed_extent_op returned %d",
2593                                                     ret);
2594                                         btrfs_delayed_ref_unlock(locked_ref);
2595                                         return ret;
2596                                 }
2597                                 continue;
2598                         }
2599
2600                         /*
2601                          * Need to drop our head ref lock and re-acquire the
2602                          * delayed ref lock and then re-check to make sure
2603                          * nobody got added.
2604                          */
2605                         spin_unlock(&locked_ref->lock);
2606                         spin_lock(&delayed_refs->lock);
2607                         spin_lock(&locked_ref->lock);
2608                         if (!list_empty(&locked_ref->ref_list) ||
2609                             locked_ref->extent_op) {
2610                                 spin_unlock(&locked_ref->lock);
2611                                 spin_unlock(&delayed_refs->lock);
2612                                 continue;
2613                         }
2614                         ref->in_tree = 0;
2615                         delayed_refs->num_heads--;
2616                         rb_erase(&locked_ref->href_node,
2617                                  &delayed_refs->href_root);
2618                         spin_unlock(&delayed_refs->lock);
2619                 } else {
2620                         actual_count++;
2621                         ref->in_tree = 0;
2622                         list_del(&ref->list);
2623                 }
2624                 atomic_dec(&delayed_refs->num_entries);
2625
2626                 if (!btrfs_delayed_ref_is_head(ref)) {
2627                         /*
2628                          * when we play the delayed ref, also correct the
2629                          * ref_mod on head
2630                          */
2631                         switch (ref->action) {
2632                         case BTRFS_ADD_DELAYED_REF:
2633                         case BTRFS_ADD_DELAYED_EXTENT:
2634                                 locked_ref->node.ref_mod -= ref->ref_mod;
2635                                 break;
2636                         case BTRFS_DROP_DELAYED_REF:
2637                                 locked_ref->node.ref_mod += ref->ref_mod;
2638                                 break;
2639                         default:
2640                                 WARN_ON(1);
2641                         }
2642                 }
2643                 spin_unlock(&locked_ref->lock);
2644
2645                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2646                                           must_insert_reserved);
2647
2648                 btrfs_free_delayed_extent_op(extent_op);
2649                 if (ret) {
2650                         locked_ref->processing = 0;
2651                         btrfs_delayed_ref_unlock(locked_ref);
2652                         btrfs_put_delayed_ref(ref);
2653                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2654                                     ret);
2655                         return ret;
2656                 }
2657
2658                 /*
2659                  * If this node is a head, that means all the refs in this head
2660                  * have been dealt with, and we will pick the next head to deal
2661                  * with, so we must unlock the head and drop it from the cluster
2662                  * list before we release it.
2663                  */
2664                 if (btrfs_delayed_ref_is_head(ref)) {
2665                         if (locked_ref->is_data &&
2666                             locked_ref->total_ref_mod < 0) {
2667                                 spin_lock(&delayed_refs->lock);
2668                                 delayed_refs->pending_csums -= ref->num_bytes;
2669                                 spin_unlock(&delayed_refs->lock);
2670                         }
2671                         btrfs_delayed_ref_unlock(locked_ref);
2672                         locked_ref = NULL;
2673                 }
2674                 btrfs_put_delayed_ref(ref);
2675                 count++;
2676                 cond_resched();
2677         }
2678
2679         /*
2680          * We don't want to include ref heads since we can have empty ref heads
2681          * and those will drastically skew our runtime down since we just do
2682          * accounting, no actual extent tree updates.
2683          */
2684         if (actual_count > 0) {
2685                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2686                 u64 avg;
2687
2688                 /*
2689                  * We weigh the current average higher than our current runtime
2690                  * to avoid large swings in the average.
2691                  */
2692                 spin_lock(&delayed_refs->lock);
2693                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2694                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2695                 spin_unlock(&delayed_refs->lock);
2696         }
2697         return 0;
2698 }
2699
2700 #ifdef SCRAMBLE_DELAYED_REFS
2701 /*
2702  * Normally delayed refs get processed in ascending bytenr order. This
2703  * correlates in most cases to the order added. To expose dependencies on this
2704  * order, we start to process the tree in the middle instead of the beginning
2705  */
2706 static u64 find_middle(struct rb_root *root)
2707 {
2708         struct rb_node *n = root->rb_node;
2709         struct btrfs_delayed_ref_node *entry;
2710         int alt = 1;
2711         u64 middle;
2712         u64 first = 0, last = 0;
2713
2714         n = rb_first(root);
2715         if (n) {
2716                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2717                 first = entry->bytenr;
2718         }
2719         n = rb_last(root);
2720         if (n) {
2721                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2722                 last = entry->bytenr;
2723         }
2724         n = root->rb_node;
2725
2726         while (n) {
2727                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2728                 WARN_ON(!entry->in_tree);
2729
2730                 middle = entry->bytenr;
2731
2732                 if (alt)
2733                         n = n->rb_left;
2734                 else
2735                         n = n->rb_right;
2736
2737                 alt = 1 - alt;
2738         }
2739         return middle;
2740 }
2741 #endif
2742
2743 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2744 {
2745         u64 num_bytes;
2746
2747         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2748                              sizeof(struct btrfs_extent_inline_ref));
2749         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2750                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2751
2752         /*
2753          * We don't ever fill up leaves all the way so multiply by 2 just to be
2754          * closer to what we're really going to want to use.
2755          */
2756         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2757 }
2758
2759 /*
2760  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2761  * would require to store the csums for that many bytes.
2762  */
2763 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2764 {
2765         u64 csum_size;
2766         u64 num_csums_per_leaf;
2767         u64 num_csums;
2768
2769         csum_size = BTRFS_MAX_ITEM_SIZE(root);
2770         num_csums_per_leaf = div64_u64(csum_size,
2771                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2772         num_csums = div64_u64(csum_bytes, root->sectorsize);
2773         num_csums += num_csums_per_leaf - 1;
2774         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2775         return num_csums;
2776 }
2777
2778 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2779                                        struct btrfs_root *root)
2780 {
2781         struct btrfs_block_rsv *global_rsv;
2782         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2783         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2784         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2785         u64 num_bytes, num_dirty_bgs_bytes;
2786         int ret = 0;
2787
2788         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2789         num_heads = heads_to_leaves(root, num_heads);
2790         if (num_heads > 1)
2791                 num_bytes += (num_heads - 1) * root->nodesize;
2792         num_bytes <<= 1;
2793         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2794         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2795                                                              num_dirty_bgs);
2796         global_rsv = &root->fs_info->global_block_rsv;
2797
2798         /*
2799          * If we can't allocate any more chunks lets make sure we have _lots_ of
2800          * wiggle room since running delayed refs can create more delayed refs.
2801          */
2802         if (global_rsv->space_info->full) {
2803                 num_dirty_bgs_bytes <<= 1;
2804                 num_bytes <<= 1;
2805         }
2806
2807         spin_lock(&global_rsv->lock);
2808         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2809                 ret = 1;
2810         spin_unlock(&global_rsv->lock);
2811         return ret;
2812 }
2813
2814 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2815                                        struct btrfs_root *root)
2816 {
2817         struct btrfs_fs_info *fs_info = root->fs_info;
2818         u64 num_entries =
2819                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2820         u64 avg_runtime;
2821         u64 val;
2822
2823         smp_mb();
2824         avg_runtime = fs_info->avg_delayed_ref_runtime;
2825         val = num_entries * avg_runtime;
2826         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2827                 return 1;
2828         if (val >= NSEC_PER_SEC / 2)
2829                 return 2;
2830
2831         return btrfs_check_space_for_delayed_refs(trans, root);
2832 }
2833
2834 struct async_delayed_refs {
2835         struct btrfs_root *root;
2836         u64 transid;
2837         int count;
2838         int error;
2839         int sync;
2840         struct completion wait;
2841         struct btrfs_work work;
2842 };
2843
2844 static void delayed_ref_async_start(struct btrfs_work *work)
2845 {
2846         struct async_delayed_refs *async;
2847         struct btrfs_trans_handle *trans;
2848         int ret;
2849
2850         async = container_of(work, struct async_delayed_refs, work);
2851
2852         /* if the commit is already started, we don't need to wait here */
2853         if (btrfs_transaction_blocked(async->root->fs_info))
2854                 goto done;
2855
2856         trans = btrfs_join_transaction(async->root);
2857         if (IS_ERR(trans)) {
2858                 async->error = PTR_ERR(trans);
2859                 goto done;
2860         }
2861
2862         /*
2863          * trans->sync means that when we call end_transaction, we won't
2864          * wait on delayed refs
2865          */
2866         trans->sync = true;
2867
2868         /* Don't bother flushing if we got into a different transaction */
2869         if (trans->transid > async->transid)
2870                 goto end;
2871
2872         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2873         if (ret)
2874                 async->error = ret;
2875 end:
2876         ret = btrfs_end_transaction(trans, async->root);
2877         if (ret && !async->error)
2878                 async->error = ret;
2879 done:
2880         if (async->sync)
2881                 complete(&async->wait);
2882         else
2883                 kfree(async);
2884 }
2885
2886 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2887                                  unsigned long count, u64 transid, int wait)
2888 {
2889         struct async_delayed_refs *async;
2890         int ret;
2891
2892         async = kmalloc(sizeof(*async), GFP_NOFS);
2893         if (!async)
2894                 return -ENOMEM;
2895
2896         async->root = root->fs_info->tree_root;
2897         async->count = count;
2898         async->error = 0;
2899         async->transid = transid;
2900         if (wait)
2901                 async->sync = 1;
2902         else
2903                 async->sync = 0;
2904         init_completion(&async->wait);
2905
2906         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2907                         delayed_ref_async_start, NULL, NULL);
2908
2909         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2910
2911         if (wait) {
2912                 wait_for_completion(&async->wait);
2913                 ret = async->error;
2914                 kfree(async);
2915                 return ret;
2916         }
2917         return 0;
2918 }
2919
2920 /*
2921  * this starts processing the delayed reference count updates and
2922  * extent insertions we have queued up so far.  count can be
2923  * 0, which means to process everything in the tree at the start
2924  * of the run (but not newly added entries), or it can be some target
2925  * number you'd like to process.
2926  *
2927  * Returns 0 on success or if called with an aborted transaction
2928  * Returns <0 on error and aborts the transaction
2929  */
2930 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2931                            struct btrfs_root *root, unsigned long count)
2932 {
2933         struct rb_node *node;
2934         struct btrfs_delayed_ref_root *delayed_refs;
2935         struct btrfs_delayed_ref_head *head;
2936         int ret;
2937         int run_all = count == (unsigned long)-1;
2938         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2939
2940         /* We'll clean this up in btrfs_cleanup_transaction */
2941         if (trans->aborted)
2942                 return 0;
2943
2944         if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
2945                 return 0;
2946
2947         if (root == root->fs_info->extent_root)
2948                 root = root->fs_info->tree_root;
2949
2950         delayed_refs = &trans->transaction->delayed_refs;
2951         if (count == 0)
2952                 count = atomic_read(&delayed_refs->num_entries) * 2;
2953
2954 again:
2955 #ifdef SCRAMBLE_DELAYED_REFS
2956         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2957 #endif
2958         trans->can_flush_pending_bgs = false;
2959         ret = __btrfs_run_delayed_refs(trans, root, count);
2960         if (ret < 0) {
2961                 btrfs_abort_transaction(trans, ret);
2962                 return ret;
2963         }
2964
2965         if (run_all) {
2966                 if (!list_empty(&trans->new_bgs))
2967                         btrfs_create_pending_block_groups(trans, root);
2968
2969                 spin_lock(&delayed_refs->lock);
2970                 node = rb_first(&delayed_refs->href_root);
2971                 if (!node) {
2972                         spin_unlock(&delayed_refs->lock);
2973                         goto out;
2974                 }
2975
2976                 while (node) {
2977                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2978                                         href_node);
2979                         if (btrfs_delayed_ref_is_head(&head->node)) {
2980                                 struct btrfs_delayed_ref_node *ref;
2981
2982                                 ref = &head->node;
2983                                 atomic_inc(&ref->refs);
2984
2985                                 spin_unlock(&delayed_refs->lock);
2986                                 /*
2987                                  * Mutex was contended, block until it's
2988                                  * released and try again
2989                                  */
2990                                 mutex_lock(&head->mutex);
2991                                 mutex_unlock(&head->mutex);
2992
2993                                 btrfs_put_delayed_ref(ref);
2994                                 cond_resched();
2995                                 goto again;
2996                         } else {
2997                                 WARN_ON(1);
2998                         }
2999                         node = rb_next(node);
3000                 }
3001                 spin_unlock(&delayed_refs->lock);
3002                 cond_resched();
3003                 goto again;
3004         }
3005 out:
3006         assert_qgroups_uptodate(trans);
3007         trans->can_flush_pending_bgs = can_flush_pending_bgs;
3008         return 0;
3009 }
3010
3011 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3012                                 struct btrfs_root *root,
3013                                 u64 bytenr, u64 num_bytes, u64 flags,
3014                                 int level, int is_data)
3015 {
3016         struct btrfs_delayed_extent_op *extent_op;
3017         int ret;
3018
3019         extent_op = btrfs_alloc_delayed_extent_op();
3020         if (!extent_op)
3021                 return -ENOMEM;
3022
3023         extent_op->flags_to_set = flags;
3024         extent_op->update_flags = true;
3025         extent_op->update_key = false;
3026         extent_op->is_data = is_data ? true : false;
3027         extent_op->level = level;
3028
3029         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3030                                           num_bytes, extent_op);
3031         if (ret)
3032                 btrfs_free_delayed_extent_op(extent_op);
3033         return ret;
3034 }
3035
3036 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3037                                       struct btrfs_root *root,
3038                                       struct btrfs_path *path,
3039                                       u64 objectid, u64 offset, u64 bytenr)
3040 {
3041         struct btrfs_delayed_ref_head *head;
3042         struct btrfs_delayed_ref_node *ref;
3043         struct btrfs_delayed_data_ref *data_ref;
3044         struct btrfs_delayed_ref_root *delayed_refs;
3045         int ret = 0;
3046
3047         delayed_refs = &trans->transaction->delayed_refs;
3048         spin_lock(&delayed_refs->lock);
3049         head = btrfs_find_delayed_ref_head(trans, bytenr);
3050         if (!head) {
3051                 spin_unlock(&delayed_refs->lock);
3052                 return 0;
3053         }
3054
3055         if (!mutex_trylock(&head->mutex)) {
3056                 atomic_inc(&head->node.refs);
3057                 spin_unlock(&delayed_refs->lock);
3058
3059                 btrfs_release_path(path);
3060
3061                 /*
3062                  * Mutex was contended, block until it's released and let
3063                  * caller try again
3064                  */
3065                 mutex_lock(&head->mutex);
3066                 mutex_unlock(&head->mutex);
3067                 btrfs_put_delayed_ref(&head->node);
3068                 return -EAGAIN;
3069         }
3070         spin_unlock(&delayed_refs->lock);
3071
3072         spin_lock(&head->lock);
3073         list_for_each_entry(ref, &head->ref_list, list) {
3074                 /* If it's a shared ref we know a cross reference exists */
3075                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3076                         ret = 1;
3077                         break;
3078                 }
3079
3080                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3081
3082                 /*
3083                  * If our ref doesn't match the one we're currently looking at
3084                  * then we have a cross reference.
3085                  */
3086                 if (data_ref->root != root->root_key.objectid ||
3087                     data_ref->objectid != objectid ||
3088                     data_ref->offset != offset) {
3089                         ret = 1;
3090                         break;
3091                 }
3092         }
3093         spin_unlock(&head->lock);
3094         mutex_unlock(&head->mutex);
3095         return ret;
3096 }
3097
3098 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3099                                         struct btrfs_root *root,
3100                                         struct btrfs_path *path,
3101                                         u64 objectid, u64 offset, u64 bytenr)
3102 {
3103         struct btrfs_root *extent_root = root->fs_info->extent_root;
3104         struct extent_buffer *leaf;
3105         struct btrfs_extent_data_ref *ref;
3106         struct btrfs_extent_inline_ref *iref;
3107         struct btrfs_extent_item *ei;
3108         struct btrfs_key key;
3109         u32 item_size;
3110         int ret;
3111
3112         key.objectid = bytenr;
3113         key.offset = (u64)-1;
3114         key.type = BTRFS_EXTENT_ITEM_KEY;
3115
3116         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3117         if (ret < 0)
3118                 goto out;
3119         BUG_ON(ret == 0); /* Corruption */
3120
3121         ret = -ENOENT;
3122         if (path->slots[0] == 0)
3123                 goto out;
3124
3125         path->slots[0]--;
3126         leaf = path->nodes[0];
3127         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3128
3129         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3130                 goto out;
3131
3132         ret = 1;
3133         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3134 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3135         if (item_size < sizeof(*ei)) {
3136                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3137                 goto out;
3138         }
3139 #endif
3140         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3141
3142         if (item_size != sizeof(*ei) +
3143             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3144                 goto out;
3145
3146         if (btrfs_extent_generation(leaf, ei) <=
3147             btrfs_root_last_snapshot(&root->root_item))
3148                 goto out;
3149
3150         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3151         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3152             BTRFS_EXTENT_DATA_REF_KEY)
3153                 goto out;
3154
3155         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3156         if (btrfs_extent_refs(leaf, ei) !=
3157             btrfs_extent_data_ref_count(leaf, ref) ||
3158             btrfs_extent_data_ref_root(leaf, ref) !=
3159             root->root_key.objectid ||
3160             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3161             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3162                 goto out;
3163
3164         ret = 0;
3165 out:
3166         return ret;
3167 }
3168
3169 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3170                           struct btrfs_root *root,
3171                           u64 objectid, u64 offset, u64 bytenr)
3172 {
3173         struct btrfs_path *path;
3174         int ret;
3175         int ret2;
3176
3177         path = btrfs_alloc_path();
3178         if (!path)
3179                 return -ENOENT;
3180
3181         do {
3182                 ret = check_committed_ref(trans, root, path, objectid,
3183                                           offset, bytenr);
3184                 if (ret && ret != -ENOENT)
3185                         goto out;
3186
3187                 ret2 = check_delayed_ref(trans, root, path, objectid,
3188                                          offset, bytenr);
3189         } while (ret2 == -EAGAIN);
3190
3191         if (ret2 && ret2 != -ENOENT) {