aboutsummaryrefslogtreecommitdiff
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c49
1 files changed, 27 insertions, 22 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index db00a66d8b84..d213ead95675 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -54,7 +54,7 @@
#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
/*
- * Estimate write bandwidth at 200ms intervals.
+ * Estimate write bandwidth or update dirty limit at 200ms intervals.
*/
#define BANDWIDTH_INTERVAL max(HZ/5, 1)
@@ -586,7 +586,7 @@ static void wb_domain_writeout_add(struct wb_domain *dom,
/* First event after period switching was turned off? */
if (unlikely(!dom->period_time)) {
/*
- * We can race with other __bdi_writeout_inc calls here but
+ * We can race with other wb_domain_writeout_add calls here but
* it does not cause any harm since the resulting time when
* timer will fire and what is in writeout_period_time will be
* roughly the same.
@@ -917,7 +917,9 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
unsigned long thresh)
{
struct wb_domain *dom = dtc_dom(dtc);
+ struct bdi_writeback *wb = dtc->wb;
u64 wb_thresh;
+ u64 wb_max_thresh;
unsigned long numerator, denominator;
unsigned long wb_min_ratio, wb_max_ratio;
@@ -931,11 +933,28 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
wb_thresh *= numerator;
wb_thresh = div64_ul(wb_thresh, denominator);
- wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
+ wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);
wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
- if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
- wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
+ wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
+ if (wb_thresh > wb_max_thresh)
+ wb_thresh = wb_max_thresh;
+
+ /*
+ * With strictlimit flag, the wb_thresh is treated as
+ * a hard limit in balance_dirty_pages() and wb_position_ratio().
+ * It's possible that wb_thresh is close to zero, not because
+ * the device is slow, but because it has been inactive.
+ * To prevent occasional writes from being blocked, we raise wb_thresh.
+ */
+ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
+ unsigned long limit = hard_dirty_limit(dom, dtc->thresh);
+ u64 wb_scale_thresh = 0;
+
+ if (limit > dtc->dirty)
+ wb_scale_thresh = (limit - dtc->dirty) / 100;
+ wb_thresh = max(wb_thresh, min(wb_scale_thresh, wb_max_thresh / 4));
+ }
return wb_thresh;
}
@@ -2724,8 +2743,6 @@ EXPORT_SYMBOL(noop_dirty_folio);
/*
* Helper function for set_page_dirty family.
*
- * Caller must hold folio_memcg_lock().
- *
* NOTE: This relies on being atomic wrt interrupts.
*/
static void folio_account_dirtied(struct folio *folio,
@@ -2758,7 +2775,6 @@ static void folio_account_dirtied(struct folio *folio,
/*
* Helper function for deaccounting dirty page without writeback.
*
- * Caller must hold folio_memcg_lock().
*/
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
{
@@ -2776,9 +2792,8 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
* If warn is true, then emit a warning if the folio is not uptodate and has
* not been truncated.
*
- * The caller must hold folio_memcg_lock(). It is the caller's
- * responsibility to prevent the folio from being truncated while
- * this function is in progress, although it may have been truncated
+ * It is the caller's responsibility to prevent the folio from being truncated
+ * while this function is in progress, although it may have been truncated
* before this function is called. Most callers have the folio locked.
* A few have the folio blocked from truncation through other means (e.g.
* zap_vma_pages() has it mapped and is holding the page table lock).
@@ -2822,14 +2837,10 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
*/
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
{
- folio_memcg_lock(folio);
- if (folio_test_set_dirty(folio)) {
- folio_memcg_unlock(folio);
+ if (folio_test_set_dirty(folio))
return false;
- }
__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
- folio_memcg_unlock(folio);
if (mapping->host) {
/* !PageAnon && !swapper_space */
@@ -2956,14 +2967,12 @@ void __folio_cancel_dirty(struct folio *folio)
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
- folio_memcg_lock(folio);
wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (folio_test_clear_dirty(folio))
folio_account_cleaned(folio, wb);
unlocked_inode_to_wb_end(inode, &cookie);
- folio_memcg_unlock(folio);
} else {
folio_clear_dirty(folio);
}
@@ -3074,7 +3083,6 @@ bool __folio_end_writeback(struct folio *folio)
struct address_space *mapping = folio_mapping(folio);
bool ret;
- folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -3105,7 +3113,6 @@ bool __folio_end_writeback(struct folio *folio)
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
node_stat_mod_folio(folio, NR_WRITTEN, nr);
- folio_memcg_unlock(folio);
return ret;
}
@@ -3118,7 +3125,6 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
- folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct inode *inode = mapping->host;
@@ -3159,7 +3165,6 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
- folio_memcg_unlock(folio);
access_ret = arch_make_folio_accessible(folio);
/*