1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/super.c
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include <linux/module.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/parser.h>
17 #include <linux/seq_file.h>
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/erofs.h>
24 static struct kmem_cache *erofs_inode_cachep __read_mostly;
26 static void init_once(void *ptr)
28 struct erofs_vnode *vi = ptr;
30 inode_init_once(&vi->vfs_inode);
33 static int __init erofs_init_inode_cache(void)
35 erofs_inode_cachep = kmem_cache_create("erofs_inode",
36 sizeof(struct erofs_vnode), 0,
37 SLAB_RECLAIM_ACCOUNT, init_once);
39 return erofs_inode_cachep != NULL ? 0 : -ENOMEM;
42 static void erofs_exit_inode_cache(void)
44 kmem_cache_destroy(erofs_inode_cachep);
47 static struct inode *alloc_inode(struct super_block *sb)
49 struct erofs_vnode *vi =
50 kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
55 /* zero out everything except vfs_inode */
56 memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode));
57 return &vi->vfs_inode;
60 static void free_inode(struct inode *inode)
62 struct erofs_vnode *vi = EROFS_V(inode);
64 /* be careful RCU symlink path (see ext4_inode_info->i_data)! */
65 if (is_inode_fast_symlink(inode))
68 kfree(vi->xattr_shared_xattrs);
70 kmem_cache_free(erofs_inode_cachep, vi);
73 static int superblock_read(struct super_block *sb)
75 struct erofs_sb_info *sbi;
76 struct buffer_head *bh;
77 struct erofs_super_block *layout;
78 unsigned int blkszbits;
84 errln("cannot read erofs superblock");
89 layout = (struct erofs_super_block *)((u8 *)bh->b_data
90 + EROFS_SUPER_OFFSET);
93 if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) {
94 errln("cannot find valid erofs superblock");
98 blkszbits = layout->blkszbits;
99 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
100 if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
101 errln("blksize %u isn't supported on this platform",
106 sbi->blocks = le32_to_cpu(layout->blocks);
107 sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
108 #ifdef CONFIG_EROFS_FS_XATTR
109 sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr);
111 sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
112 #ifdef CONFIG_EROFS_FS_ZIP
113 /* TODO: clusterbits should be related to inode */
114 sbi->clusterbits = blkszbits;
116 if (1 << (sbi->clusterbits - PAGE_SHIFT) > Z_EROFS_CLUSTER_MAX_PAGES)
117 errln("clusterbits %u is not supported on this kernel",
121 sbi->root_nid = le16_to_cpu(layout->root_nid);
122 sbi->inos = le64_to_cpu(layout->inos);
124 sbi->build_time = le64_to_cpu(layout->build_time);
125 sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec);
127 memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
128 memcpy(sbi->volume_name, layout->volume_name,
129 sizeof(layout->volume_name));
137 #ifdef CONFIG_EROFS_FAULT_INJECTION
138 char *erofs_fault_name[FAULT_MAX] = {
139 [FAULT_KMALLOC] = "kmalloc",
142 static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
145 struct erofs_fault_info *ffi = &sbi->fault_info;
148 atomic_set(&ffi->inject_ops, 0);
149 ffi->inject_rate = rate;
150 ffi->inject_type = (1 << FAULT_MAX) - 1;
152 memset(ffi, 0, sizeof(struct erofs_fault_info));
155 set_opt(sbi, FAULT_INJECTION);
158 static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
163 if (args->from && match_int(args, &rate))
166 __erofs_build_fault_attr(sbi, rate);
170 static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
172 return sbi->fault_info.inject_rate;
175 static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
180 static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
183 infoln("fault_injection options not supported");
187 static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
193 static void default_options(struct erofs_sb_info *sbi)
195 /* set up some FS parameters */
196 #ifdef CONFIG_EROFS_FS_ZIP
197 sbi->max_sync_decompress_pages = DEFAULT_MAX_SYNC_DECOMPRESS_PAGES;
200 #ifdef CONFIG_EROFS_FS_XATTR
201 set_opt(sbi, XATTR_USER);
204 #ifdef CONFIG_EROFS_FS_POSIX_ACL
205 set_opt(sbi, POSIX_ACL);
218 static match_table_t erofs_tokens = {
219 {Opt_user_xattr, "user_xattr"},
220 {Opt_nouser_xattr, "nouser_xattr"},
222 {Opt_noacl, "noacl"},
223 {Opt_fault_injection, "fault_injection=%u"},
227 static int parse_options(struct super_block *sb, char *options)
229 substring_t args[MAX_OPT_ARGS];
236 while ((p = strsep(&options, ",")) != NULL) {
242 args[0].to = args[0].from = NULL;
243 token = match_token(p, erofs_tokens, args);
246 #ifdef CONFIG_EROFS_FS_XATTR
248 set_opt(EROFS_SB(sb), XATTR_USER);
250 case Opt_nouser_xattr:
251 clear_opt(EROFS_SB(sb), XATTR_USER);
255 infoln("user_xattr options not supported");
257 case Opt_nouser_xattr:
258 infoln("nouser_xattr options not supported");
261 #ifdef CONFIG_EROFS_FS_POSIX_ACL
263 set_opt(EROFS_SB(sb), POSIX_ACL);
266 clear_opt(EROFS_SB(sb), POSIX_ACL);
270 infoln("acl options not supported");
273 infoln("noacl options not supported");
276 case Opt_fault_injection:
277 err = erofs_build_fault_attr(EROFS_SB(sb), args);
283 errln("Unrecognized mount option \"%s\" "
284 "or missing value", p);
291 #ifdef EROFS_FS_HAS_MANAGED_CACHE
293 static const struct address_space_operations managed_cache_aops;
295 static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
297 int ret = 1; /* 0 - busy */
298 struct address_space *const mapping = page->mapping;
300 DBG_BUGON(!PageLocked(page));
301 DBG_BUGON(mapping->a_ops != &managed_cache_aops);
303 if (PagePrivate(page))
304 ret = erofs_try_to_free_cached_page(mapping, page);
309 static void managed_cache_invalidatepage(struct page *page,
310 unsigned int offset, unsigned int length)
312 const unsigned int stop = length + offset;
314 DBG_BUGON(!PageLocked(page));
316 /* Check for potential overflow in debug mode */
317 DBG_BUGON(stop > PAGE_SIZE || stop < length);
319 if (offset == 0 && stop == PAGE_SIZE)
320 while (!managed_cache_releasepage(page, GFP_NOFS))
324 static const struct address_space_operations managed_cache_aops = {
325 .releasepage = managed_cache_releasepage,
326 .invalidatepage = managed_cache_invalidatepage,
329 static struct inode *erofs_init_managed_cache(struct super_block *sb)
331 struct inode *inode = new_inode(sb);
333 if (unlikely(inode == NULL))
334 return ERR_PTR(-ENOMEM);
337 inode->i_size = OFFSET_MAX;
339 inode->i_mapping->a_ops = &managed_cache_aops;
340 mapping_set_gfp_mask(inode->i_mapping,
341 GFP_NOFS | __GFP_HIGHMEM |
342 __GFP_MOVABLE | __GFP_NOFAIL);
348 static int erofs_read_super(struct super_block *sb,
349 const char *dev_name, void *data, int silent)
352 struct erofs_sb_info *sbi;
355 infoln("read_super, device -> %s", dev_name);
356 infoln("options -> %s", (char *)data);
358 if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) {
359 errln("failed to set erofs blksize");
363 sbi = kzalloc(sizeof(struct erofs_sb_info), GFP_KERNEL);
364 if (unlikely(sbi == NULL)) {
370 err = superblock_read(sb);
374 sb->s_magic = EROFS_SUPER_MAGIC;
375 sb->s_flags |= SB_RDONLY | SB_NOATIME;
376 sb->s_maxbytes = MAX_LFS_FILESIZE;
379 sb->s_op = &erofs_sops;
381 #ifdef CONFIG_EROFS_FS_XATTR
382 sb->s_xattr = erofs_xattr_handlers;
385 /* set erofs default mount options */
386 default_options(sbi);
388 err = parse_options(sb, data);
393 infoln("root inode @ nid %llu", ROOT_NID(sbi));
395 if (test_opt(sbi, POSIX_ACL))
396 sb->s_flags |= SB_POSIXACL;
398 sb->s_flags &= ~SB_POSIXACL;
400 #ifdef CONFIG_EROFS_FS_ZIP
401 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
404 #ifdef EROFS_FS_HAS_MANAGED_CACHE
405 sbi->managed_cache = erofs_init_managed_cache(sb);
406 if (IS_ERR(sbi->managed_cache)) {
407 err = PTR_ERR(sbi->managed_cache);
408 goto err_init_managed_cache;
412 /* get the root inode */
413 inode = erofs_iget(sb, ROOT_NID(sbi), true);
415 err = PTR_ERR(inode);
419 if (!S_ISDIR(inode->i_mode)) {
420 errln("rootino(nid %llu) is not a directory(i_mode %o)",
421 ROOT_NID(sbi), inode->i_mode);
427 sb->s_root = d_make_root(inode);
428 if (sb->s_root == NULL) {
433 /* save the device name to sbi */
434 sbi->dev_name = __getname();
435 if (sbi->dev_name == NULL) {
440 snprintf(sbi->dev_name, PATH_MAX, "%s", dev_name);
441 sbi->dev_name[PATH_MAX - 1] = '\0';
443 erofs_register_super(sb);
446 infoln("mounted on %s with opts: %s.", dev_name,
450 * please add a label for each exit point and use
451 * the following name convention, thus new features
452 * can be integrated easily without renaming labels.
457 #ifdef EROFS_FS_HAS_MANAGED_CACHE
458 iput(sbi->managed_cache);
459 err_init_managed_cache:
463 sb->s_fs_info = NULL;
470 * could be triggered after deactivate_locked_super()
471 * is called, thus including umount and failed to initialize.
473 static void erofs_put_super(struct super_block *sb)
475 struct erofs_sb_info *sbi = EROFS_SB(sb);
477 /* for cases which are failed in "read_super" */
481 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
483 infoln("unmounted for %s", sbi->dev_name);
484 __putname(sbi->dev_name);
486 #ifdef EROFS_FS_HAS_MANAGED_CACHE
487 iput(sbi->managed_cache);
490 mutex_lock(&sbi->umount_mutex);
492 #ifdef CONFIG_EROFS_FS_ZIP
493 /* clean up the compression space of this sb */
494 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
497 erofs_unregister_super(sb);
498 mutex_unlock(&sbi->umount_mutex);
501 sb->s_fs_info = NULL;
505 struct erofs_mount_private {
506 const char *dev_name;
510 /* support mount_bdev() with options */
511 static int erofs_fill_super(struct super_block *sb,
512 void *_priv, int silent)
514 struct erofs_mount_private *priv = _priv;
516 return erofs_read_super(sb, priv->dev_name,
517 priv->options, silent);
520 static struct dentry *erofs_mount(
521 struct file_system_type *fs_type, int flags,
522 const char *dev_name, void *data)
524 struct erofs_mount_private priv = {
525 .dev_name = dev_name,
529 return mount_bdev(fs_type, flags, dev_name,
530 &priv, erofs_fill_super);
533 static void erofs_kill_sb(struct super_block *sb)
535 kill_block_super(sb);
538 static struct file_system_type erofs_fs_type = {
539 .owner = THIS_MODULE,
541 .mount = erofs_mount,
542 .kill_sb = erofs_kill_sb,
543 .fs_flags = FS_REQUIRES_DEV,
545 MODULE_ALIAS_FS("erofs");
547 static int __init erofs_module_init(void)
551 erofs_check_ondisk_layout_definitions();
552 infoln("initializing erofs " EROFS_VERSION);
554 err = erofs_init_inode_cache();
558 err = register_shrinker(&erofs_shrinker_info);
562 err = z_erofs_init_zip_subsystem();
566 err = register_filesystem(&erofs_fs_type);
570 infoln("successfully to initialize erofs");
574 z_erofs_exit_zip_subsystem();
576 unregister_shrinker(&erofs_shrinker_info);
578 erofs_exit_inode_cache();
583 static void __exit erofs_module_exit(void)
585 unregister_filesystem(&erofs_fs_type);
586 z_erofs_exit_zip_subsystem();
587 unregister_shrinker(&erofs_shrinker_info);
588 erofs_exit_inode_cache();
589 infoln("successfully finalize erofs");
592 /* get filesystem statistics */
593 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
595 struct super_block *sb = dentry->d_sb;
596 struct erofs_sb_info *sbi = EROFS_SB(sb);
597 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
599 buf->f_type = sb->s_magic;
600 buf->f_bsize = EROFS_BLKSIZ;
601 buf->f_blocks = sbi->blocks;
602 buf->f_bfree = buf->f_bavail = 0;
604 buf->f_files = ULLONG_MAX;
605 buf->f_ffree = ULLONG_MAX - sbi->inos;
607 buf->f_namelen = EROFS_NAME_LEN;
609 buf->f_fsid.val[0] = (u32)id;
610 buf->f_fsid.val[1] = (u32)(id >> 32);
614 static int erofs_show_options(struct seq_file *seq, struct dentry *root)
616 struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
618 #ifdef CONFIG_EROFS_FS_XATTR
619 if (test_opt(sbi, XATTR_USER))
620 seq_puts(seq, ",user_xattr");
622 seq_puts(seq, ",nouser_xattr");
624 #ifdef CONFIG_EROFS_FS_POSIX_ACL
625 if (test_opt(sbi, POSIX_ACL))
626 seq_puts(seq, ",acl");
628 seq_puts(seq, ",noacl");
630 if (test_opt(sbi, FAULT_INJECTION))
631 seq_printf(seq, ",fault_injection=%u",
632 erofs_get_fault_rate(sbi));
636 static int erofs_remount(struct super_block *sb, int *flags, char *data)
638 struct erofs_sb_info *sbi = EROFS_SB(sb);
639 unsigned int org_mnt_opt = sbi->mount_opt;
640 unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
643 DBG_BUGON(!sb_rdonly(sb));
644 err = parse_options(sb, data);
648 if (test_opt(sbi, POSIX_ACL))
649 sb->s_flags |= SB_POSIXACL;
651 sb->s_flags &= ~SB_POSIXACL;
656 __erofs_build_fault_attr(sbi, org_inject_rate);
657 sbi->mount_opt = org_mnt_opt;
662 const struct super_operations erofs_sops = {
663 .put_super = erofs_put_super,
664 .alloc_inode = alloc_inode,
665 .free_inode = free_inode,
666 .statfs = erofs_statfs,
667 .show_options = erofs_show_options,
668 .remount_fs = erofs_remount,
671 module_init(erofs_module_init);
672 module_exit(erofs_module_exit);
674 MODULE_DESCRIPTION("Enhanced ROM File System");
675 MODULE_AUTHOR("Gao Xiang, Yu Chao, Miao Xie, CONSUMER BG, HUAWEI Inc.");
676 MODULE_LICENSE("GPL");