aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2020-07-20 17:12:00 -0700
committerXiaochen Shen <xiaochen.shen@intel.com>2021-10-22 23:58:40 +0800
commit2b2a2452424b633e0ee5d2d209a949ee619a4f5c (patch)
tree99e133b6ec0264849c20f0b9f934f345532379a8
parent5760d88b37533007358c39a95e13e8cc262e811c (diff)
downloadlinux-2b2a2452424b633e0ee5d2d209a949ee619a4f5c.tar.gz
dmaengine: idxd: add a test module for kdirect API
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/idxd/Makefile3
-rw-r--r--drivers/dma/idxd/ktest.c173
3 files changed, 186 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 80c2c03cb0141..3be8f744ce2fc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -335,6 +335,16 @@ config INTEL_IDXD_PERFMON
If unsure, say N.
+config INTEL_IDXD_KTEST
+ tristate "Intel IDXD kernel direct API test"
+ depends on INTEL_IDXD && INTEL_IDXD_SVM && DMATEST
+ help
+ Enable support for IDXD kernel direct API testing.
+
+ Say Y if you want to perform testing for idxd KAPI.
+
+ If unsure, say N
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64 && !UML
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 51c619edc1435..e761de91328a5 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -10,3 +10,6 @@ idxd_bus-y := bus.o
obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
idxd_compat-y := compat.o
+
+obj-$(CONFIG_INTEL_IDXD_KTEST) += idxd_ktest.o
+idxd_ktest-y := ktest.o
diff --git a/drivers/dma/idxd/ktest.c b/drivers/dma/idxd/ktest.c
new file mode 100644
index 0000000000000..4b81a7892a055
--- /dev/null
+++ b/drivers/dma/idxd/ktest.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/fs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/notifier.h>
+#include <linux/intel-svm.h>
+#include <linux/freezer.h>
+#include <linux/dmaengine.h>
+#include <uapi/linux/idxd.h>
+#include <linux/idxd.h>
+#include "../dmatest.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IDXD);
+
+static int type = IDXD_TYPE_ANY;
+module_param(type, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(type,
+ "Device type (-1: any, 0: DSA, 1: IAX)");
+
+static int mode = IDXD_WQ_ANY;
+module_param(mode, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mode,
+ "WQ mode (-1: any, 0: dedicated, 1: shared)");
+
+static int node = NUMA_NO_NODE;
+module_param(node, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(node, "NUMA node for device (-1: any)");
+
+static struct dmatest_kdirect_ops ktest_ops;
+
+static void idxd_ktest_callback(void *arg)
+{
+ struct dmatest_done *done = arg;
+ struct dmatest_thread *thread =
+ container_of(done, struct dmatest_thread, test_done);
+ if (!thread->done) {
+ done->done = true;
+ wake_up_all(done->wait);
+ } else {
+ /*
+ * If thread->done, it means that this callback occurred
+ * after the parent thread has cleaned up. This can
+ * happen in the case that driver doesn't implement
+ * the terminate_all() functionality and a dma operation
+ * did not occur within the timeout period
+ */
+ WARN(1, "idxd_ktest: Kernel memory may be corrupted!!\n");
+ }
+}
+
+static int submit_memmove_operation(struct dma_chan *chan,
+ struct dmatest_thread *thread,
+ struct dmatest_data *src,
+ struct dmatest_data *dst,
+ unsigned int len,
+ int timeout)
+{
+ struct dma_device *dma = chan->device;
+ struct dmatest_done *done = &thread->test_done;
+ struct idxd_desc *desc;
+ struct dsa_hw_desc *hw;
+ struct dma_async_tx_descriptor *tx;
+ u8 status;
+ dma_cookie_t cookie;
+
+ desc = (struct idxd_desc *)dma->kdops.device_get_desc(chan, IDXD_OP_BLOCK);
+ if (IS_ERR(desc)) {
+ pr_warn("desc alloc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ tx = &desc->txd;
+ tx->flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ hw = desc->hw;
+ hw->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_RCI;
+ hw->opcode = DSA_OPCODE_MEMMOVE;
+ hw->src_addr = (u64)(src->aligned[0] + src->off);
+ hw->dst_addr = (u64)(dst->aligned[0] + dst->off);
+ hw->xfer_size = len;
+ hw->priv = 1;
+ hw->completion_addr = (u64)desc->completion;
+ /* int_handle and pasid filled by alloc_desc */
+
+ done->done = false;
+ tx->callback = idxd_ktest_callback;
+ tx->callback_param = done;
+
+ cookie = tx->tx_submit(tx);
+
+ if (dma_submit_error(cookie)) {
+ pr_warn("%s submit error!", __func__);
+ msleep(100);
+ return -ENXIO;
+ }
+
+ dma_async_issue_pending(chan);
+
+ wait_event_freezable_timeout(thread->done_wait, done->done, timeout);
+
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+
+ if (!done->done) {
+ pr_warn("%s: test timed out!", __func__);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int param_check(void)
+{
+ if (type < IDXD_TYPE_ANY || type >= IDXD_TYPE_MAX) {
+ pr_warn("Device type (%d) invalid\n", type);
+ return -EINVAL;
+ }
+
+ if (mode >= IDXD_WQ_MAX || mode < IDXD_WQ_ANY) {
+ pr_warn("WQ mode (%d) invalid\n", mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init ktest_init(void)
+{
+ struct idxd_wq_request *req;
+ int rc;
+
+ rc = param_check();
+ if (rc)
+ return rc;
+
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ sprintf(req->name, "dmaengine");
+ req->node = node;
+ req->type = type;
+ req->mode = mode;
+ ktest_ops.filter_param = req;
+ ktest_ops.operation = submit_memmove_operation;
+ ktest_ops.dma_filter = idxd_filter_kdirect;
+
+ rc = dmatest_kdirect_register(&ktest_ops);
+ if (!rc)
+ return rc;
+
+ return 0;
+}
+module_init(ktest_init);
+
+static void __exit ktest_exit(void)
+{
+ dmatest_kdirect_unregister();
+ kfree(ktest_ops.filter_param);
+}
+module_exit(ktest_exit);