aboutsummaryrefslogtreecommitdiff
path: root/fs/f2fs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/super.c')
-rw-r--r--fs/f2fs/super.c101
1 files changed, 73 insertions, 28 deletions
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 87ab5696bd48..fc7d463dee15 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -150,6 +150,8 @@ enum {
Opt_mode,
Opt_fault_injection,
Opt_fault_type,
+ Opt_lazytime,
+ Opt_nolazytime,
Opt_quota,
Opt_noquota,
Opt_usrquota,
@@ -226,6 +228,8 @@ static match_table_t f2fs_tokens = {
{Opt_mode, "mode=%s"},
{Opt_fault_injection, "fault_injection=%u"},
{Opt_fault_type, "fault_type=%u"},
+ {Opt_lazytime, "lazytime"},
+ {Opt_nolazytime, "nolazytime"},
{Opt_quota, "quota"},
{Opt_noquota, "noquota"},
{Opt_usrquota, "usrquota"},
@@ -834,6 +838,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
set_opt(sbi, READ_EXTENT_CACHE);
break;
case Opt_noextent_cache:
+ if (F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_DEVICE_ALIAS)) {
+ f2fs_err(sbi, "device aliasing requires extent cache");
+ return -EINVAL;
+ }
clear_opt(sbi, READ_EXTENT_CACHE);
break;
case Opt_noinline_data:
@@ -918,6 +926,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
f2fs_info(sbi, "fault_type options not supported");
break;
#endif
+ case Opt_lazytime:
+ sb->s_flags |= SB_LAZYTIME;
+ break;
+ case Opt_nolazytime:
+ sb->s_flags &= ~SB_LAZYTIME;
+ break;
#ifdef CONFIG_QUOTA
case Opt_quota:
case Opt_usrquota:
@@ -1158,7 +1172,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
break;
}
- strcpy(ext[ext_cnt], name);
+ ret = strscpy(ext[ext_cnt], name);
+ if (ret < 0) {
+ kfree(name);
+ return ret;
+ }
F2FS_OPTION(sbi).compress_ext_cnt++;
kfree(name);
break;
@@ -1187,7 +1205,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
break;
}
- strcpy(noext[noext_cnt], name);
+ ret = strscpy(noext[noext_cnt], name);
+ if (ret < 0) {
+ kfree(name);
+ return ret;
+ }
F2FS_OPTION(sbi).nocompress_ext_cnt++;
kfree(name);
break;
@@ -1738,6 +1760,18 @@ static int f2fs_freeze(struct super_block *sb)
static int f2fs_unfreeze(struct super_block *sb)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ /*
+ * It will update discard_max_bytes of mounted lvm device to zero
+ * after creating snapshot on this lvm device, let's drop all
+ * remained discards.
+ * We don't need to disable real-time discard because discard_max_bytes
+ * will recover after removal of snapshot.
+ */
+ if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
+ f2fs_issue_discard_timeout(sbi);
+
clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
@@ -2474,6 +2508,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
}
}
+ adjust_unusable_cap_perc(sbi);
if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
if (test_opt(sbi, DISABLE_CHECKPOINT)) {
err = f2fs_disable_checkpoint(sbi);
@@ -2518,7 +2553,6 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
- adjust_unusable_cap_perc(sbi);
*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
return 0;
restore_checkpoint:
@@ -3322,7 +3356,7 @@ loff_t max_file_blocks(struct inode *inode)
* fit within U32_MAX + 1 data units.
*/
- result = min(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
+ result = umin(result, F2FS_BYTES_TO_BLK(((loff_t)U32_MAX + 1) * 4096));
return result;
}
@@ -4155,8 +4189,7 @@ static bool system_going_down(void)
|| system_state == SYSTEM_RESTART;
}
-void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
- bool irq_context)
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason)
{
struct super_block *sb = sbi->sb;
bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
@@ -4168,10 +4201,12 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
if (!f2fs_hw_is_readonly(sbi)) {
save_stop_reason(sbi, reason);
- if (irq_context && !shutdown)
- schedule_work(&sbi->s_error_work);
- else
- f2fs_record_stop_reason(sbi);
+ /*
+ * always create an asynchronous task to record stop_reason
+ * in order to avoid potential deadlock when running into
+ * f2fs_record_stop_reason() synchronously.
+ */
+ schedule_work(&sbi->s_error_work);
}
/*
@@ -4217,6 +4252,16 @@ static void f2fs_record_error_work(struct work_struct *work)
f2fs_record_stop_reason(sbi);
}
+static inline unsigned int get_first_zoned_segno(struct f2fs_sb_info *sbi)
+{
+ int devi;
+
+ for (devi = 0; devi < sbi->s_ndevs; devi++)
+ if (bdev_is_zoned(FDEV(devi).bdev))
+ return GET_SEGNO(sbi, FDEV(devi).start_blk);
+ return 0;
+}
+
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
@@ -4617,6 +4662,9 @@ try_onemore:
/* For write statistics */
sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
+ /* get segno of first zoned block device */
+ sbi->first_zoned_segno = get_first_zoned_segno(sbi);
+
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
if (__exist_node_summaries(sbi))
@@ -4738,26 +4786,23 @@ try_onemore:
reset_checkpoint:
/*
* If the f2fs is not readonly and fsync data recovery succeeds,
- * check zoned block devices' write pointer consistency.
+ * write pointer consistency of cursegs and other zones are already
+ * checked and fixed during recovery. However, if recovery fails,
+ * write pointers are left untouched, and retry-mount should check
+ * them here.
*/
- if (f2fs_sb_has_blkzoned(sbi) && !f2fs_readonly(sb)) {
- int err2;
-
- f2fs_notice(sbi, "Checking entire write pointers");
- err2 = f2fs_check_write_pointer(sbi);
- if (err2)
- err = err2;
- }
+ if (skip_recovery)
+ err = f2fs_check_and_fix_write_pointer(sbi);
if (err)
goto free_meta;
+ /* f2fs_recover_fsync_data() cleared this already */
+ clear_sbi_flag(sbi, SBI_POR_DOING);
+
err = f2fs_init_inmem_curseg(sbi);
if (err)
goto sync_free_meta;
- /* f2fs_recover_fsync_data() cleared this already */
- clear_sbi_flag(sbi, SBI_POR_DOING);
-
if (test_opt(sbi, DISABLE_CHECKPOINT)) {
err = f2fs_disable_checkpoint(sbi);
if (err)
@@ -4991,9 +5036,6 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_shrinker();
if (err)
goto free_sysfs;
- err = register_filesystem(&f2fs_fs_type);
- if (err)
- goto free_shrinker;
f2fs_create_root_stats();
err = f2fs_init_post_read_processing();
if (err)
@@ -5016,7 +5058,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_create_casefold_cache();
if (err)
goto free_compress_cache;
+ err = register_filesystem(&f2fs_fs_type);
+ if (err)
+ goto free_casefold_cache;
return 0;
+free_casefold_cache:
+ f2fs_destroy_casefold_cache();
free_compress_cache:
f2fs_destroy_compress_cache();
free_compress_mempool:
@@ -5031,8 +5078,6 @@ free_post_read:
f2fs_destroy_post_read_processing();
free_root_stats:
f2fs_destroy_root_stats();
- unregister_filesystem(&f2fs_fs_type);
-free_shrinker:
f2fs_exit_shrinker();
free_sysfs:
f2fs_exit_sysfs();
@@ -5056,6 +5101,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ unregister_filesystem(&f2fs_fs_type);
f2fs_destroy_casefold_cache();
f2fs_destroy_compress_cache();
f2fs_destroy_compress_mempool();
@@ -5064,7 +5110,6 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_iostat_processing();
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
- unregister_filesystem(&f2fs_fs_type);
f2fs_exit_shrinker();
f2fs_exit_sysfs();
f2fs_destroy_garbage_collection_cache();