aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2021-07-25 13:59:56 -0700
committerXiaochen Shen <xiaochen.shen@intel.com>2021-10-23 00:26:24 +0800
commit1cbda083dce71e5bcaeae60233ae5ed7eaa519b5 (patch)
tree9ad3aa72182118d07d66b063a02df7a19d85abab
parent87bd7d845435962e88a5b237882d3f686f4f6e45 (diff)
downloadlinux-1cbda083dce71e5bcaeae60233ae5ed7eaa519b5.tar.gz
mm/pagezero: Add page zero engine register/unregister functions
Add hooks to allow a module to register functions to implement a page zero engine. This includes the infrastructure to allocate all necessary data structures during registration and to cleanup when unregistering. There are two hooks that maintain the pre-zeroed pages. The first adds the requested number of pre-zeroed pages to the pcp list. It returns >0 return if the driver needs more memory. The second hook passes a freshly allocated large page of the requested order to the driver. Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--include/linux/mm.h11
-rw-r--r--mm/page_alloc.c88
2 files changed, 98 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 73a52aba448f9..f43ce4663b1d3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3284,5 +3284,16 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
return 0;
}
+/* page clear engine */
+struct page_clear_engine_ops {
+ void *(*create)(int node);
+ int (*getpages)(void *private, int migratetype, int want, struct list_head *l, int *countp);
+ void (*provide)(void *v, struct page *page);
+ int (*clean)(int node, void **v);
+};
+
+int register_page_clear_engine(const struct page_clear_engine_ops *ops);
+int unregister_page_clear_engine(const struct page_clear_engine_ops *ops);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 952b76069eb96..ceb9021ba90e7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -160,6 +160,12 @@ volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
#endif
+static void *(*alloc_zone_private)(int node);
+static int (*get_zero_pages)(void *private, int migratetype, int want,
+ struct list_head *l, int *countp);
+static void (*provide_page)(void *v, struct page *page);
+static int (*engine_cleanup)(int node, void **v);
+
/*
* Array of node states.
*/
@@ -3568,10 +3574,24 @@ struct page *__rmqueue_pcp_zero_list(struct zone *zone, int migratetype,
{
struct list_head *list = &pcp->lists[MIGRATE_PREZEROED];
struct page *page;
+ int order;
do {
if (list_empty(list)) {
- // place holder to allocate pages
+ spin_lock(&zone->lock);
+ order = get_zero_pages(zone->private, migratetype,
+ pcp->batch, list, &pcp->count);
+ if (order > 0) {
+ page = __rmqueue(zone, order, migratetype, alloc_flags);
+ if (page) {
+ if (is_migrate_cma(get_pcppage_migratetype(page)))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
+ -(1 << order));
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
+ provide_page(zone->private, page);
+ }
+ }
+ spin_unlock(&zone->lock);
if (list_empty(list))
return NULL;
}
@@ -9397,4 +9417,70 @@ bool take_page_off_buddy(struct page *page)
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
}
+
+static int walk_all_nodes(int (*fn)(int node, void **p))
+{
+ pg_data_t *node;
+ struct zone *zone;
+ int n, ret = 0;
+
+ for_each_node_state(n, N_MEMORY) {
+ node = NODE_DATA(n);
+
+ zone = &node->node_zones[ZONE_NORMAL];
+ if (atomic_long_read(&zone->managed_pages)) {
+ ret = fn(zone->node, &zone->private);
+ if (ret)
+ goto done;
+ }
+ }
+done:
+ return ret;
+}
+
+static int do_alloc(int node, void **p)
+{
+ void *ptr = alloc_zone_private(node);
+
+ if (ptr)
+ *p = ptr;
+
+ return ptr ? 0 : -ENOMEM;
+}
+
+int register_page_clear_engine(const struct page_clear_engine_ops *engine_ops)
+{
+ int ret;
+
+ if (alloc_zone_private)
+ return -EBUSY;
+
+ alloc_zone_private = engine_ops->create;
+
+ ret = walk_all_nodes(do_alloc);
+ if (ret) {
+ walk_all_nodes(engine_ops->clean);
+ return ret;
+ }
+
+ get_zero_pages = engine_ops->getpages;
+ provide_page = engine_ops->provide;
+ engine_cleanup = engine_ops->clean;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_page_clear_engine);
+
+int unregister_page_clear_engine(const struct page_clear_engine_ops *engine_ops)
+{
+ if (engine_ops->clean != engine_cleanup)
+ return -EINVAL;
+
+ walk_all_nodes(engine_cleanup);
+
+ alloc_zone_private = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_page_clear_engine);
#endif