aboutsummaryrefslogtreecommitdiff
path: root/fs/f2fs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/file.c')
-rw-r--r--fs/f2fs/file.c105
1 files changed, 72 insertions, 33 deletions
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 093039dee992..e50363583f01 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -159,7 +159,7 @@ out_sem:
sb_end_pagefault(inode->i_sb);
err:
- return block_page_mkwrite_return(err);
+ return vmf_fs_error(err);
}
static const struct vm_operations_struct f2fs_file_vm_ops = {
@@ -526,7 +526,11 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
set_inode_flag(inode, FI_MMAP_FILE);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+
return 0;
}
@@ -794,7 +798,7 @@ int f2fs_truncate(struct inode *inode)
if (err)
return err;
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -882,7 +886,7 @@ int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
/* we need to show initial sectors used for inline_data/dentries */
if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
@@ -901,11 +905,11 @@ static void __setattr_copy(struct mnt_idmap *idmap,
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
@@ -1008,7 +1012,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
spin_lock(&F2FS_I(inode)->i_size_lock);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
F2FS_I(inode)->last_disk_size = i_size_read(inode);
spin_unlock(&F2FS_I(inode)->i_size_lock);
}
@@ -1724,6 +1728,7 @@ next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err && err != -ENODATA)
goto out_err;
@@ -1835,7 +1840,7 @@ static long f2fs_fallocate(struct file *file, int mode,
}
if (!ret) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -1919,12 +1924,19 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
int err = f2fs_convert_inline_inode(inode);
if (err)
return err;
- if (!f2fs_may_compress(inode))
- return -EINVAL;
- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ if (!f2fs_may_compress(inode) ||
+ (S_ISREG(inode->i_mode) &&
+ F2FS_HAS_BLOCKS(inode))) {
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return -EINVAL;
- if (set_compress_context(inode))
- return -EOPNOTSUPP;
+ }
+ err = set_compress_context(inode);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
+
+ if (err)
+ return err;
}
}
@@ -1937,7 +1949,7 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
else
clear_inode_flag(inode, FI_PROJ_INHERIT);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_set_inode_flags(inode);
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
@@ -2465,6 +2477,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
gc_control.init_gc_type = sync ? FG_GC : BG_GC;
gc_control.err_gc_skipped = sync;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
out:
mnt_drop_write_file(filp);
@@ -2508,6 +2521,7 @@ do_more:
}
gc_control.victim_segno = GET_SEGNO(sbi, range->start);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
if (ret) {
if (ret == -EBUSY)
@@ -2874,10 +2888,10 @@ out_src:
if (ret)
goto out_unlock;
- src->i_mtime = src->i_ctime = current_time(src);
+ inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
f2fs_mark_inode_dirty_sync(src, false);
if (src != dst) {
- dst->i_mtime = dst->i_ctime = current_time(dst);
+ inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
f2fs_mark_inode_dirty_sync(dst, false);
}
f2fs_update_time(sbi, REQ_TIME);
@@ -2990,6 +3004,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
sm->last_victim[ALLOC_NEXT] = end_segno + 1;
gc_control.victim_segno = start_segno;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
if (ret == -EAGAIN)
ret = 0;
@@ -3073,7 +3088,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
goto out_unlock;
fi->i_projid = kprojid;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
f2fs_unlock_op(sbi);
@@ -3243,11 +3258,12 @@ int f2fs_precache_extents(struct inode *inode)
return -EOPNOTSUPP;
map.m_lblk = 0;
+ map.m_pblk = 0;
map.m_next_pgofs = NULL;
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- end = max_file_blocks(inode);
+ end = F2FS_BLK_ALIGN(i_size_read(inode));
while (map.m_lblk < end) {
map.m_len = end - map.m_lblk;
@@ -3255,7 +3271,7 @@ int f2fs_precache_extents(struct inode *inode)
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
- if (err)
+ if (err || !map.m_len)
return err;
map.m_lblk = m_next_extent;
@@ -3511,7 +3527,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
}
set_inode_flag(inode, FI_COMPRESS_RELEASED);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -3710,7 +3726,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (ret >= 0) {
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
unlock_inode:
@@ -3976,6 +3992,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
file_start_write(filp);
inode_lock(inode);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
ret = -EBUSY;
goto out;
@@ -3989,12 +4006,22 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
F2FS_I(inode)->i_compress_algorithm = option.algorithm;
F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
+ /* Set default level */
+ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
+ F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+ else
+ F2FS_I(inode)->i_compress_level = 0;
+ /* Adjust mount option level */
+ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
+ F2FS_OPTION(sbi).compress_level)
+ F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
f2fs_mark_inode_dirty_sync(inode, true);
if (!f2fs_is_compress_backend_ready(inode))
f2fs_warn(sbi, "compression algorithm is successfully set, "
"but current kernel doesn't support this algorithm.");
out:
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
inode_unlock(inode);
file_end_write(filp);
@@ -4079,10 +4106,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
-
- ret = redirty_blocks(inode, page_idx, len);
+ while (count && count >= cluster_size) {
+ ret = redirty_blocks(inode, page_idx, cluster_size);
if (ret < 0)
break;
@@ -4092,8 +4117,14 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
- count -= len;
- page_idx += len;
+ count -= cluster_size;
+ page_idx += cluster_size;
+
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4153,10 +4184,8 @@ static int f2fs_ioc_compress_file(struct file *filp)
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
-
- ret = redirty_blocks(inode, page_idx, len);
+ while (count && count >= cluster_size) {
+ ret = redirty_blocks(inode, page_idx, cluster_size);
if (ret < 0)
break;
@@ -4166,8 +4195,14 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
- count -= len;
- page_idx += len;
+ count -= cluster_size;
+ page_idx += cluster_size;
+
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4579,6 +4614,7 @@ static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
dec_page_count(sbi, F2FS_DIO_WRITE);
if (error)
return error;
+ f2fs_update_time(sbi, REQ_TIME);
f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
return 0;
}
@@ -4823,6 +4859,9 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
filp->f_mode &= ~FMODE_RANDOM;
spin_unlock(&filp->f_lock);
return 0;
+ } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
+ /* Load extent cache at the first readahead. */
+ f2fs_precache_extents(inode);
}
err = generic_fadvise(filp, offset, len, advice);