summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-10 16:03:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-10 16:03:16 -0800
commit6792dfe383dd20ed270da198aa0676bac47245b4 (patch)
tree380a490de696b7f1f08323b56b149275cc43f2e1 /mm/slub.c
parentcbf2822a7d44352c5c4c15baf0da3a3dc8495e90 (diff)
parent0e048316ff577e12c748e2d0a2e4f0f7b006654d (diff)
Merge branch 'akpm' (patches from Andrew Morton)
Merge misc fixes from Andrew Morton: "A bunch of fixes" * emailed patches fron Andrew Morton <akpm@linux-foundation.org>: ocfs2: check existence of old dentry in ocfs2_link() ocfs2: update inode size after zeroing the hole ocfs2: fix issue that ocfs2_setattr() does not deal with new_i_size==i_size mm/memory-failure.c: move refcount only in !MF_COUNT_INCREASED smp.h: fix x86+cpu.c sparse warnings about arch nonboot CPU calls mm: fix page leak at nfs_symlink() slub: do not assert not having lock in removing freed partial gitignore: add all.config ocfs2: fix ocfs2_sync_file() if filesystem is readonly drivers/edac/edac_mc_sysfs.c: poll timeout cannot be zero fs/file.c:fdtable: avoid triggering OOMs from alloc_fdmem xen: properly account for _PAGE_NUMA during xen pte translations mm/slub.c: list_lock may not be held in some circumstances drivers/md/bcache/extents.c: use %zi to format size_t vmcore: prevent PT_NOTE p_memsz overflow during header update drivers/message/i2o/i2o_config.c: fix deadlock in compat_ioctl(I2OGETIOPS) Documentation/: update 00-INDEX files checkpatch: fix detection of git repository get_maintainer: fix detection of git repository drivers/misc/sgi-gru/grukdump.c: unlocking should be conditional in gru_dump_context()
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7e3e0458bce4..25f14ad8f817 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1004,21 +1004,19 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
- lockdep_assert_held(&n->list_lock);
-
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_add(&page->lru, &n->full);
}
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
- lockdep_assert_held(&n->list_lock);
-
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_del(&page->lru);
}
@@ -1520,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
*/
-static inline void add_partial(struct kmem_cache_node *n,
- struct page *page, int tail)
+static inline void
+__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
- lockdep_assert_held(&n->list_lock);
-
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
@@ -1532,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial);
}
-static inline void remove_partial(struct kmem_cache_node *n,
- struct page *page)
+static inline void add_partial(struct kmem_cache_node *n,
+ struct page *page, int tail)
{
lockdep_assert_held(&n->list_lock);
+ __add_partial(n, page, tail);
+}
+static inline void
+__remove_partial(struct kmem_cache_node *n, struct page *page)
+{
list_del(&page->lru);
n->nr_partial--;
}
+static inline void remove_partial(struct kmem_cache_node *n,
+ struct page *page)
+{
+ lockdep_assert_held(&n->list_lock);
+ __remove_partial(n, page);
+}
+
/*
* Remove slab from the partial list, freeze it and
* return the pointer to the freelist.
@@ -2906,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
inc_slabs_node(kmem_cache_node, node, page->objects);
/*
- * the lock is for lockdep's sake, not for any actual
- * race protection
+ * No locks need to be taken here as it has just been
+ * initialized and there is no concurrent access.
*/
- spin_lock(&n->list_lock);
- add_partial(n, page, DEACTIVATE_TO_HEAD);
- spin_unlock(&n->list_lock);
+ __add_partial(n, page, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3197,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- remove_partial(n, page);
+ __remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,