aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c15
-rw-r--r--mm/huge_memory.c1
-rw-r--r--mm/migrate.c1
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/slub.c43
5 files changed, 52 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ac3775c1ce4c..ffdfbc8b0e3c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping,
continue;
if (xas.xa_index > max || xa_is_value(folio))
break;
+ if (xa_is_sibling(folio))
+ break;
if (!folio_try_get_rcu(folio))
goto retry;
@@ -2629,6 +2631,13 @@ err:
return err;
}
+static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
+{
+ unsigned int shift = folio_shift(folio);
+
+ return (pos1 >> shift == pos2 >> shift);
+}
+
/**
* filemap_read - Read data from the page cache.
* @iocb: The iocb to read.
@@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
writably_mapped = mapping_writably_mapped(mapping);
/*
- * When a sequential read accesses a page several times, only
+ * When a read accesses the same folio several times, only
* mark it as accessed the first time.
*/
- if (iocb->ki_pos >> PAGE_SHIFT !=
- ra->prev_pos >> PAGE_SHIFT)
+ if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
+ fbatch.folios[0]))
folio_mark_accessed(fbatch.folios[0]);
for (i = 0; i < folio_batch_count(&fbatch); i++) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f7248002dad9..834f288b3769 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2377,6 +2377,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
page_tail);
page_tail->mapping = head->mapping;
page_tail->index = head->index + tail;
+ page_tail->private = 0;
/* Page flags must be visible before we make the page non-compound. */
smp_wmb();
diff --git a/mm/migrate.c b/mm/migrate.c
index e51588e95f57..6c1ea61f39d8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1106,6 +1106,7 @@ static int unmap_and_move(new_page_t get_new_page,
if (!newpage)
return -ENOMEM;
+ newpage->private = 0;
rc = __unmap_and_move(page, newpage, force, mode);
if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(newpage, reason);
diff --git a/mm/readahead.c b/mm/readahead.c
index 57a015108254..fdcd28cbd92d 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -510,6 +510,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
new_order--;
}
+ filemap_invalidate_lock_shared(mapping);
while (index <= limit) {
unsigned int order = new_order;
@@ -536,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
}
read_pages(ractl);
+ filemap_invalidate_unlock_shared(mapping);
/*
* If there were already pages in the page cache, then we may have
diff --git a/mm/slub.c b/mm/slub.c
index e5535020e0fd..b1281b8654bd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -726,25 +726,48 @@ static struct track *get_track(struct kmem_cache *s, void *object,
return kasan_reset_tag(p + alloc);
}
-static void noinline set_track(struct kmem_cache *s, void *object,
- enum track_item alloc, unsigned long addr)
-{
- struct track *p = get_track(s, object, alloc);
-
#ifdef CONFIG_STACKDEPOT
+static noinline depot_stack_handle_t set_track_prepare(void)
+{
+ depot_stack_handle_t handle;
unsigned long entries[TRACK_ADDRS_COUNT];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
- p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+ handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
+
+ return handle;
+}
+#else
+static inline depot_stack_handle_t set_track_prepare(void)
+{
+ return 0;
+}
#endif
+static void set_track_update(struct kmem_cache *s, void *object,
+ enum track_item alloc, unsigned long addr,
+ depot_stack_handle_t handle)
+{
+ struct track *p = get_track(s, object, alloc);
+
+#ifdef CONFIG_STACKDEPOT
+ p->handle = handle;
+#endif
p->addr = addr;
p->cpu = smp_processor_id();
p->pid = current->pid;
p->when = jiffies;
}
+static __always_inline void set_track(struct kmem_cache *s, void *object,
+ enum track_item alloc, unsigned long addr)
+{
+ depot_stack_handle_t handle = set_track_prepare();
+
+ set_track_update(s, object, alloc, addr, handle);
+}
+
static void init_tracking(struct kmem_cache *s, void *object)
{
struct track *p;
@@ -1373,6 +1396,10 @@ static noinline int free_debug_processing(
int cnt = 0;
unsigned long flags, flags2;
int ret = 0;
+ depot_stack_handle_t handle = 0;
+
+ if (s->flags & SLAB_STORE_USER)
+ handle = set_track_prepare();
spin_lock_irqsave(&n->list_lock, flags);
slab_lock(slab, &flags2);
@@ -1391,7 +1418,7 @@ next_object:
}
if (s->flags & SLAB_STORE_USER)
- set_track(s, object, TRACK_FREE, addr);
+ set_track_update(s, object, TRACK_FREE, addr, handle);
trace(s, slab, object, 0);
/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
init_object(s, object, SLUB_RED_INACTIVE);
@@ -2936,6 +2963,7 @@ redo:
if (!freelist) {
c->slab = NULL;
+ c->tid = next_tid(c->tid);
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
@@ -2968,6 +2996,7 @@ deactivate_slab:
freelist = c->freelist;
c->slab = NULL;
c->freelist = NULL;
+ c->tid = next_tid(c->tid);
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
deactivate_slab(s, slab, freelist);