[v2 04/11] soc/fsl: Introduce drivers for the DPAA QMan

Roy Pledge Roy.Pledge at freescale.com
Thu Aug 13 06:14:50 AEST 2015


From: Geoff Thorpe <Geoff.Thorpe at freescale.com>

This driver enables the Freescale DPAA 1.0 Queue Manager block.
QMan is hardware queue manager that allows accelerators connected
to the SoC datapath to enqueue and dequeue frames to hardware queues,
allowing data exchange between software and accelerators.

Signed-off-by: Geoff Thorpe <Geoff.Thorpe at freescale.com>
Signed-off-by: Emil Medve <Emilian.Medve at Freescale.com>
Signed-off-by: Roy Pledge <Roy.Pledge at freescale.com>
---
 arch/powerpc/platforms/85xx/corenet_generic.c |   16 +
 arch/powerpc/platforms/85xx/p1023_rdb.c       |   14 +
 drivers/soc/fsl/qbman/Kconfig                 |   60 +-
 drivers/soc/fsl/qbman/Makefile                |    3 +
 drivers/soc/fsl/qbman/bman.c                  |    3 +-
 drivers/soc/fsl/qbman/bman_api.c              |    6 +-
 drivers/soc/fsl/qbman/dpaa_resource.c         |    7 +-
 drivers/soc/fsl/qbman/dpaa_sys.h              |   60 +-
 drivers/soc/fsl/qbman/qman.c                  | 1026 +++++++++
 drivers/soc/fsl/qbman/qman.h                  | 1128 ++++++++++
 drivers/soc/fsl/qbman/qman_api.c              | 2819 +++++++++++++++++++++++++
 drivers/soc/fsl/qbman/qman_driver.c           |   83 +
 drivers/soc/fsl/qbman/qman_portal.c           |  629 ++++++
 drivers/soc/fsl/qbman/qman_priv.h             |  279 +++
 drivers/soc/fsl/qbman/qman_utils.c            |  305 +++
 include/soc/fsl/qman.h                        | 1977 +++++++++++++++++
 16 files changed, 8407 insertions(+), 8 deletions(-)
 create mode 100644 drivers/soc/fsl/qbman/qman.c
 create mode 100644 drivers/soc/fsl/qbman/qman.h
 create mode 100644 drivers/soc/fsl/qbman/qman_api.c
 create mode 100644 drivers/soc/fsl/qbman/qman_driver.c
 create mode 100644 drivers/soc/fsl/qbman/qman_portal.c
 create mode 100644 drivers/soc/fsl/qbman/qman_priv.h
 create mode 100644 drivers/soc/fsl/qbman/qman_utils.c
 create mode 100644 include/soc/fsl/qman.h

diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index bd839dc..f24e52b 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -203,6 +203,21 @@ static int __init corenet_generic_probe(void)
 	return 0;
 }
 
+/* Early setup is required for large chunks of contiguous (and coarsely-aligned)
+ * memory. The following shoe-horns QMan "init_early" calls into the
+ * platform setup to let them parse their CCSR nodes early on.
+ */
+#ifdef CONFIG_FSL_QMAN_CONFIG
+void __init qman_init_early(void);
+#endif
+
+__init void corenet_ds_init_early(void)
+{
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	qman_init_early();
+#endif
+}
+
 define_machine(corenet_generic) {
 	.name			= "CoreNet Generic",
 	.probe			= corenet_generic_probe,
@@ -221,6 +236,7 @@ define_machine(corenet_generic) {
 #else
 	.power_save		= e500_idle,
 #endif
+	.init_early		= corenet_ds_init_early,
 };
 
 machine_arch_initcall(corenet_generic, corenet_gen_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index d5b7509..56f7b2a 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -103,7 +103,20 @@ static int __init p1023_rdb_probe(void)
 	unsigned long root = of_get_flat_dt_root();
 
 	return of_flat_dt_is_compatible(root, "fsl,P1023RDB");
+}
+
+/* Early setup is required for large chunks of contiguous (and coarsely-aligned)
+ * memory. The following shoe-horns QMan "init_early" calls into the
+ * platform setup to let them parse their CCSR nodes early on. */
+#ifdef CONFIG_FSL_QMAN_CONFIG
+void __init qman_init_early(void);
+#endif
 
+static __init void p1023_rdb_init_early(void)
+{
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	qman_init_early();
+#endif
 }
 
 define_machine(p1023_rdb) {
@@ -119,4 +132,5 @@ define_machine(p1023_rdb) {
 	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
 	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
 #endif
+	.init_early		= p1023_rdb_init_early,
 };
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index 9829b6f..1ff52a8 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -17,15 +17,71 @@ config FSL_DPA_CHECKING
 		any use of it by other code. Not recommended for performance
 
 config FSL_BMAN
-	tristate "BMan device management"
+	bool "BMan device management"
 	default n
 	help
 		FSL DPAA BMan driver
 
 config FSL_BMAN_PORTAL
-	tristate "BMan portal(s)"
+	bool "BMan portal(s)"
 	default n
 	help
 		FSL BMan portal driver
 
+config FSL_QMAN
+	bool "QMan device management"
+	default n
+	help
+		FSL DPAA QMan driver
+
+if FSL_QMAN
+
+config FSL_QMAN_CONFIG
+	bool "QMan device management"
+	default y
+	help
+	  If this linux image is running natively, you need this option. If this
+	  linux image is running as a guest OS under the hypervisor, only one
+	  guest OS ("the control plane") needs this option.
+
+config FSL_QMAN_TEST
+	tristate "QMan self-tests"
+	default n
+	help
+		Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+	bool "QMan high-level self-test"
+	depends on FSL_QMAN_TEST
+	default y
+	help
+	  This requires the presence of cpu-affine portals, and performs
+	  high-level API testing with them (whichever portal(s) are affine to
+	  the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+	bool "QMan 'hot potato' data-stashing self-test"
+	depends on FSL_QMAN_TEST
+	default y
+	help
+	  This performs a "hot potato" style test enqueuing/dequeuing a frame
+	  across a series of FQs scheduled to different portals (and cpus), with
+	  DQRR, data and context stashing always on.
+
+config FSL_QMAN_DEBUGFS
+	tristate "QMan debugfs support"
+	depends on DEBUG_FS
+	default n
+	help
+		QMan debugfs support
+
+config FSL_QMAN_INIT_TIMEOUT
+	int "timeout for qman init stage, in seconds"
+	default 10
+	help
+	The timeout setting to quit the initialization loop for non-control
+	partition in case the control partition fails to boot-up.
+
+endif # FSL_QMAN
+
 endif # FSL_DPA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
index d5a595d..0d96598 100644
--- a/drivers/soc/fsl/qbman/Makefile
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -5,3 +5,6 @@ obj-$(CONFIG_FSL_BMAN)				+= bman.o
 obj-$(CONFIG_FSL_BMAN_PORTAL)			+= bman-portal.o
 bman-portal-y					 = bman_portal.o bman_api.o	\
 						   bman_utils.o
+
+obj-$(CONFIG_FSL_QMAN)				+= qman_api.o qman_utils.o qman_driver.o
+obj-$(CONFIG_FSL_QMAN_CONFIG)			+= qman.o qman_portal.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index d6e2204..e74f93c 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -540,7 +540,6 @@ static const struct of_device_id of_fsl_bman_ids[] = {
 	},
 	{}
 };
-MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
 
 static struct platform_driver of_fsl_bman_driver = {
 	.driver = {
@@ -551,4 +550,4 @@ static struct platform_driver of_fsl_bman_driver = {
 	.remove = of_fsl_bman_remove,
 };
 
-module_platform_driver(of_fsl_bman_driver);
+builtin_platform_driver(of_fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_api.c b/drivers/soc/fsl/qbman/bman_api.c
index 7b84e26..a389db9 100644
--- a/drivers/soc/fsl/qbman/bman_api.c
+++ b/drivers/soc/fsl/qbman/bman_api.c
@@ -65,7 +65,11 @@ struct bman_portal {
 	u8 alloced;
 };
 
+
 #ifdef FSL_DPA_PORTAL_SHARE
+/* For an explanation of the locking, redirection, or affine-portal logic,
+ * please consult the QMan driver for details. This is the same, only simpler
+ * (no fiddly QMan-specific bits.) */
 #define PORTAL_IRQ_LOCK(p, irqflags) \
 	do { \
 		if ((p)->is_shared) \
@@ -173,7 +177,7 @@ static void depletion_unlink(struct bman_pool *pool)
 	PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
 }
 
-/* In the case that the application's core loop calls
+/* In the case that the application's core loop calls qman_poll() and
  * bman_poll(), we ought to balance how often we incur the overheads of the
  * slow-path poll. We'll use two decrementer sources. The idle decrementer
  * constant is used when the last slow-poll detected no work to do, and the busy
diff --git a/drivers/soc/fsl/qbman/dpaa_resource.c b/drivers/soc/fsl/qbman/dpaa_resource.c
index 80d2394..b85c4a5 100644
--- a/drivers/soc/fsl/qbman/dpaa_resource.c
+++ b/drivers/soc/fsl/qbman/dpaa_resource.c
@@ -28,7 +28,10 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#if defined(CONFIG_FSL_BMAN_PORTAL) || defined(CONFIG_FSL_BMAN_PORTAL_MODULE)
+#if defined(CONFIG_FSL_BMAN_PORTAL) ||		\
+    defined(CONFIG_FSL_BMAN_PORTAL_MODULE) ||	\
+    defined(CONFIG_FSL_QMAN_PORTAL) ||		\
+    defined(CONFIG_FSL_QMAN_PORTAL_MODULE)
 #include "dpaa_sys.h"
 
 /* The allocator is a (possibly-empty) list of these */
@@ -353,4 +356,4 @@ u32 dpaa_resource_release(struct dpaa_resource *alloc,
 	return total_invalid;
 }
 EXPORT_SYMBOL(dpaa_resource_release);
-#endif	/* CONFIG_FSL_BMAN_PORTAL* */
+#endif	/* CONFIG_FSL_*MAN_PORTAL* */
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index 6bf870a..d3d6fc4 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -157,10 +157,68 @@ static inline void copy_words(void *dest, const void *src, size_t sz)
 #endif
 
 /************/
+/* RB-trees */
+/************/
+
+/* We encapsulate RB-trees so that its easier to use non-linux forms in
+ * non-linux systems. This also encapsulates the extra plumbing that linux code
+ * usually provides when using RB-trees. This encapsulation assumes that the
+ * data type held by the tree is u32. */
+
+struct dpa_rbtree {
+	struct rb_root root;
+};
+#define DPA_RBTREE { .root = RB_ROOT }
+
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+	tree->root = RB_ROOT;
+}
+
+#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+	struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
+	while (*p) { \
+		u32 item; \
+		parent = *p; \
+		item = rb_entry(parent, type, node_field)->val_field; \
+		if (obj->val_field < item) \
+			p = &parent->rb_left; \
+		else if (obj->val_field > item) \
+			p = &parent->rb_right; \
+		else \
+			return -EBUSY; \
+	} \
+	rb_link_node(&obj->node_field, parent, p); \
+	rb_insert_color(&obj->node_field, &tree->root); \
+	return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+	rb_erase(&obj->node_field, &tree->root); \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+	type *ret; \
+	struct rb_node *p = tree->root.rb_node; \
+	while (p) { \
+		ret = rb_entry(p, type, node_field); \
+		if (val < ret->val_field) \
+			p = p->rb_left; \
+		else if (val > ret->val_field) \
+			p = p->rb_right; \
+		else \
+			return ret; \
+	} \
+	return NULL; \
+}
+
+/************/
 /* Bootargs */
 /************/
 
-/* BMan has "bportals=", they use the same syntax
+/* QMan has "qportals=" and BMan has "bportals=", they use the same syntax
  * though; a comma-separated list of items, each item being a cpu index and/or a
  * range of cpu indices, and each item optionally be prefixed by "s" to indicate
  * that the portal associated with that cpu should be shared. See bman_driver.c
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000..17ddbf1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,1026 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#include <asm/cacheflush.h>
+
+/* Last updated for v00.800 of the BG */
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n)	(0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n)	(0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n)	(0x000c + ((n) * 0x10))
+#define REG_DD_CFG		0x0200
+#define REG_DCP_CFG(n)		(0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n)	(0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n)	(0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC		0x0400
+#define REG_PFDR_FP_HEAD	0x0404
+#define REG_PFDR_FP_TAIL	0x0408
+#define REG_PFDR_FP_LWIT	0x0410
+#define REG_PFDR_CFG		0x0414
+#define REG_SFDR_CFG		0x0500
+#define REG_SFDR_IN_USE		0x0504
+#define REG_WQ_CS_CFG(n)	(0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID	0x0630
+#define REG_WQ_SC_DD_CFG(n)	(0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n)	(0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n)	(0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n)	(0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n)	(0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG		0x0800
+#define REG_ECSR		0x0a00
+#define REG_ECIR		0x0a04
+#define REG_EADR		0x0a08
+#define REG_ECIR2		0x0a0c
+#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
+#define REG_MCR			0x0b00
+#define REG_MCP(n)		(0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG		0x0be0
+#define REG_HID_CFG		0x0bf0
+#define REG_IDLE_STAT		0x0bf4
+#define REG_IP_REV_1		0x0bf8
+#define REG_IP_REV_2		0x0bfc
+#define REG_FQD_BARE		0x0c00
+#define REG_PFDR_BARE		0x0c20
+#define REG_offset_BAR		0x0004	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR		0x0010	/* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE		0x0c80
+#define REG_QCSP_BAR		0x0c84
+#define REG_CI_SCHED_CFG	0x0d00
+#define REG_SRCIDR		0x0d04
+#define REG_LIODNR		0x0d08
+#define REG_CI_RLM_AVG		0x0d14
+#define REG_ERR_ISR		0x0e00	/* + "enum qm_isr_reg" */
+#define REG_REV3_QCSP_LIO_CFG(n)	(0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n)	(0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n)	(0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR		0x01000000
+#define MCR_get_rslt(v)		(u8)((v) >> 24)
+#define MCR_rslt_idle(r)	(!rslt || (rslt >= 0xf0))
+#define MCR_rslt_ok(r)		(rslt == 0xf0)
+#define MCR_rslt_eaccess(r)	(rslt == 0xf8)
+#define MCR_rslt_inval(r)	(rslt == 0xff)
+
+/* Corenet initiator settings. Stash request queues are 4-deep to match cores
+   ability to snarf. Stash priority is 3, other priorities are 2. */
+#define FSL_QMAN_CI_SCHED_CFG_SRCCIV   4
+#define FSL_QMAN_CI_SCHED_CFG_SRQ_W    3
+#define FSL_QMAN_CI_SCHED_CFG_RW_W     2
+#define FSL_QMAN_CI_SCHED_CFG_BMAN_W   2
+
+struct qman;
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+	qm_wq_portal = 0,
+	qm_wq_pool = 1,
+	qm_wq_fman0 = 2,
+	qm_wq_fman1 = 3,
+	qm_wq_caam = 4,
+	qm_wq_pme = 5,
+	qm_wq_first = qm_wq_portal,
+	qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+	qm_memory_fqd,
+	qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE	0x20000000	/* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE	0x10000000	/* Corenet Target Data Error */
+#define QM_EIRQ_CITT	0x08000000	/* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI	0x04000000	/* PFDR Low Watermark */
+#define QM_EIRQ_MBEI	0x02000000	/* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI	0x01000000	/* Single-bit ECC Error */
+#define QM_EIRQ_PEBI	0x00800000	/* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI	0x00020000	/* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI	0x00010000	/* Invalid Command Verb */
+#define QM_EIRQ_IDDI	0x00000800	/* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI	0x00000400	/* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI	0x00000200	/* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI	0x00000100	/* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE	0x00000010	/* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI	0x00000008	/* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI	0x00000004	/* Invalid Enqueue State */
+#define QM_EIRQ_IECI	0x00000002	/* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI	0x00000001	/* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+				QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+				QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR	(QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+			QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+			QM_EIRQ_IFSI)
+
+union qman_ecir {
+	u32 ecir_raw;
+	struct {
+		u32 __reserved:2;
+		u32 portal_type:1;
+		u32 portal_num:5;
+		u32 fqid:24;
+	} __packed info;
+};
+
+union qman_ecir2 {
+	u32 ecir2_raw;
+	struct {
+		u32 portal_type:1;
+		u32 __reserved:21;
+		u32 portal_num:10;
+	} __packed info;
+};
+
+union qman_eadr {
+	u32 eadr_raw;
+	struct {
+		u32 __reserved1:4;
+		u32 memid:4;
+		u32 __reserved2:12;
+		u32 eadr:12;
+	} __packed info;
+	struct {
+		u32 __reserved1:3;
+		u32 memid:5;
+		u32 __reserved:8;
+		u32 eadr:16;
+	} __packed info_rev3;
+};
+
+struct qman_hwerr_txt {
+	u32 mask;
+	const char *txt;
+};
+
+#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+	QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
+	QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
+	QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
+	QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
+	QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+	QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+	QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
+	QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
+	QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
+	QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
+	QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
+	QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
+	QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
+	QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
+	QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
+	QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
+	QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
+	QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
+};
+#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
+
+struct qman_error_info_mdata {
+	u16 addr_mask;
+	u16 bits;
+	const char *txt;
+};
+
+#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct qman_error_info_mdata error_mdata[] = {
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
+	QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
+	QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
+	QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
+	QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
+	QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
+	QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
+	QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"),
+};
+#define QMAN_ERR_MDATA_COUNT \
+	(sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/**
+ * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
+ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define qm_err_isr_status_read(qm)	\
+		__qm_err_isr_read(qm, qm_isr_status)
+#define qm_err_isr_status_clear(qm, m)	\
+		__qm_err_isr_write(qm, qm_isr_status, m)
+#define qm_err_isr_enable_read(qm)	\
+		__qm_err_isr_read(qm, qm_isr_enable)
+#define qm_err_isr_enable_write(qm, v)	\
+		__qm_err_isr_write(qm, qm_isr_enable, v)
+#define qm_err_isr_disable_read(qm)	\
+		__qm_err_isr_read(qm, qm_isr_disable)
+#define qm_err_isr_disable_write(qm, v)	\
+		__qm_err_isr_write(qm, qm_isr_disable, v)
+#define qm_err_isr_inhibit(qm)		\
+		__qm_err_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_err_isr_uninhibit(qm)	\
+		__qm_err_isr_write(qm, qm_isr_inhibit, 0)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Encapsulate "struct qman *" as a cast of the register space address. */
+
+static struct qman *qm_create(void *regs)
+{
+	return (struct qman *)regs;
+}
+
+static inline u32 __qm_in(struct qman *qm, u32 offset)
+{
+	return ioread32be((void *)qm + offset);
+}
+static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
+{
+	iowrite32be(val, (void *)qm + offset);
+}
+#define qm_in(reg)		__qm_in(qm, REG_##reg)
+#define qm_out(reg, val)	__qm_out(qm, REG_##reg, val)
+
+static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
+{
+	return __qm_in(qm, REG_ERR_ISR + (n << 2));
+}
+
+static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
+{
+	__qm_out(qm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
+			int ed, u8 sernd)
+{
+	DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
+			(portal == qm_dc_portal_fman1));
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+	else
+		qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
+			u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
+			u8 csw6, u8 csw7)
+{
+	qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+		((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+		((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+		((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(struct qman *qm)
+{
+	qm_out(HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(struct qman *qm)
+{
+	qm_out(CI_SCHED_CFG,
+	       0x80000000 | /* write srcciv enable */
+	       (FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
+	       (FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
+	       (FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
+	       FSL_QMAN_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor)
+{
+	u32 v = qm_in(IP_REV_1);
+	*id = (v >> 16);
+	*major = (v >> 8) & 0xff;
+	*minor = v & 0xff;
+}
+
+static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
+			int enable, int prio, int stash, u32 size)
+{
+	u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+	u32 exp = ilog2(size);
+	/* choke if size isn't within range */
+	DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+			is_power_of_2(size));
+	/* choke if 'ba' has lower-alignment than 'size' */
+	DPA_ASSERT(!(ba & (size - 1)));
+	__qm_out(qm, offset, upper_32_bits(ba));
+	__qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
+	__qm_out(qm, offset + REG_offset_AR,
+		(enable ? 0x80000000 : 0) |
+		(prio ? 0x40000000 : 0) |
+		(stash ? 0x20000000 : 0) |
+		(exp - 1));
+}
+
+static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
+{
+	qm_out(PFDR_FP_LWIT, th & 0xffffff);
+	qm_out(PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
+{
+	qm_out(SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
+{
+	u8 rslt = MCR_get_rslt(qm_in(MCR));
+
+	DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+	/* Make sure the command interface is 'idle' */
+	if (!MCR_rslt_idle(rslt))
+		panic("QMAN_MCR isn't idle");
+
+	/* Write the MCR command params then the verb */
+	qm_out(MCP(0), pfdr_start);
+	/* TODO: remove this - it's a workaround for a model bug that is
+	 * corrected in more recent versions. We use the workaround until
+	 * everyone has upgraded. */
+	qm_out(MCP(1), (pfdr_start + num - 16));
+	lwsync();
+	qm_out(MCR, MCR_INIT_PFDR);
+	/* Poll for the result */
+	do {
+		rslt = MCR_get_rslt(qm_in(MCR));
+	} while (!MCR_rslt_idle(rslt));
+	if (MCR_rslt_ok(rslt))
+		return 0;
+	if (MCR_rslt_eaccess(rslt))
+		return -EACCES;
+	if (MCR_rslt_inval(rslt))
+		return -EINVAL;
+	pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+	return -ENODEV;
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* We support only one of these */
+static struct qman *qm;
+static struct device_node *qm_node;
+
+/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
+ * during qman_init_ccsr(). */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+	fqd_a = rmem->base;
+	fqd_sz = rmem->size;
+
+	WARN_ON(!(fqd_a && fqd_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+size_t qman_fqd_size(void)
+{
+	return fqd_sz;
+}
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+	pfdr_a = rmem->base;
+	pfdr_sz = rmem->size;
+
+	WARN_ON(!(pfdr_a && pfdr_sz));
+
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
+
+/* Parse the <name> property to extract the memory location and size and
+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
+ * size. Also flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static __init int parse_mem_property(struct device_node *node,
+				     dma_addr_t *addr, size_t *sz, int zero)
+{
+	int ret;
+
+	/* If using a "zero-pma", don't try to zero it, even if you asked */
+	if (zero && of_find_property(node, "zero-pma", &ret)) {
+		pr_info("  it's a 'zero-pma', not zeroing from s/w\n");
+		zero = 0;
+	}
+
+	if (zero) {
+		/* map as cacheable, non-guarded */
+		void __iomem *tmpp = ioremap_prot(*addr, *sz, 0);
+
+		memset_io(tmpp, 0, *sz);
+		flush_dcache_range((unsigned long)tmpp,
+				   (unsigned long)tmpp + *sz);
+		iounmap(tmpp);
+	}
+
+	return 0;
+}
+
+/* TODO:
+ * - there is obviously no handling of errors,
+ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
+ *   both memory resources to zero.
+ */
+static int __init fsl_qman_init(struct device_node *node)
+{
+	struct resource res;
+	u32 __iomem *regs;
+	const char *s;
+	int ret, standby = 0;
+	u16 id;
+	u8 major, minor;
+
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		pr_err("Can't get %s property 'reg'\n", node->full_name);
+		return ret;
+	}
+	s = of_get_property(node, "fsl,hv-claimable", &ret);
+	if (s && !strcmp(s, "standby"))
+		standby = 1;
+	if (!standby) {
+		ret = parse_mem_property(node, &fqd_a, &fqd_sz, 1);
+		BUG_ON(ret);
+		ret = parse_mem_property(node, &pfdr_a, &pfdr_sz, 0);
+		BUG_ON(ret);
+	}
+	/* Global configuration */
+	regs = ioremap(res.start, res.end - res.start + 1);
+	qm = qm_create(regs);
+	qm_node = node;
+	qm_get_version(qm, &id, &major, &minor);
+	pr_info("Ver: %04x,%02x,%02x\n", id, major, minor);
+	if (!qman_ip_rev) {
+		if ((major == 1) && (minor == 0)) {
+			pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+			iounmap(regs);
+			return -ENODEV;
+		} else if ((major == 1) && (minor == 1))
+			qman_ip_rev = QMAN_REV11;
+		else if	((major == 1) && (minor == 2))
+			qman_ip_rev = QMAN_REV12;
+		else if ((major == 2) && (minor == 0))
+			qman_ip_rev = QMAN_REV20;
+		else if ((major == 3) && (minor == 0))
+			qman_ip_rev = QMAN_REV30;
+		else if ((major == 3) && (minor == 1))
+			qman_ip_rev = QMAN_REV31;
+		else {
+			pr_warn("Unknown version, default to rev1.1\n");
+			qman_ip_rev = QMAN_REV11;
+		}
+	}
+
+	if (standby) {
+		pr_info("  -> in standby mode\n");
+		return 0;
+	}
+	return 0;
+}
+
+int qman_have_ccsr(void)
+{
+	return qm ? 1 : 0;
+}
+
+__init void qman_init_early(void)
+{
+	struct device_node *dn;
+	int ret;
+
+	for_each_compatible_node(dn, NULL, "fsl,qman") {
+		if (qm)
+			pr_err("%s: only one 'fsl,qman' allowed\n",
+				dn->full_name);
+		else {
+			if (!of_device_is_available(dn))
+				continue;
+
+			ret = fsl_qman_init(dn);
+			BUG_ON(ret);
+		}
+	}
+}
+
+static void log_edata_bits(u32 bit_count)
+{
+	u32 i, j, mask = 0xffffffff;
+
+	pr_warn("ErrInt, EDATA:\n");
+	i = bit_count/32;
+	if (bit_count%32) {
+		i++;
+		mask = ~(mask << bit_count%32);
+	}
+	j = 16-i;
+	pr_warn("  0x%08x\n", qm_in(EDATA(j)) & mask);
+	j++;
+	for (; j < 16; j++)
+		pr_warn("  0x%08x\n", qm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+	union qman_ecir ecir_val;
+	union qman_eadr eadr_val;
+
+	ecir_val.ecir_raw = qm_in(ECIR);
+	/* Is portal info valid */
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		union qman_ecir2 ecir2_val;
+
+		ecir2_val.ecir2_raw = qm_in(ECIR2);
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			pr_warn("ErrInt: %s id %d\n",
+				ecir2_val.info.portal_type ? "DCP" : "SWP",
+				ecir2_val.info.portal_num);
+		}
+		if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+			pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.eadr_raw = qm_in(EADR);
+			pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+				error_mdata[eadr_val.info_rev3.memid].txt,
+				error_mdata[eadr_val.info_rev3.memid].addr_mask
+					& eadr_val.info_rev3.eadr);
+			log_edata_bits(
+				error_mdata[eadr_val.info_rev3.memid].bits);
+		}
+	} else {
+		if (ecsr_val & PORTAL_ECSR_ERR) {
+			pr_warn("ErrInt: %s id %d\n",
+				ecir_val.info.portal_type ? "DCP" : "SWP",
+				ecir_val.info.portal_num);
+		}
+		if (ecsr_val & FQID_ECSR_ERR)
+			pr_warn("ErrInt: ecir.fqid 0x%x\n", ecir_val.info.fqid);
+
+		if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+			eadr_val.eadr_raw = qm_in(EADR);
+			pr_warn("ErrInt: EADR Memory: %s, 0x%x\n",
+				error_mdata[eadr_val.info.memid].txt,
+				error_mdata[eadr_val.info.memid].addr_mask
+					& eadr_val.info.eadr);
+			log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+		}
+	}
+}
+
+/* QMan interrupt handler */
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+	ier_val = qm_err_isr_enable_read(qm);
+	isr_val = qm_err_isr_status_read(qm);
+	ecsr_val = qm_in(ECSR);
+	isr_mask = isr_val & ier_val;
+
+	if (!isr_mask)
+		return IRQ_NONE;
+	for (i = 0; i < QMAN_HWE_COUNT; i++) {
+		if (qman_hwerr_txts[i].mask & isr_mask) {
+			pr_warn("ErrInt: %s\n", qman_hwerr_txts[i].txt);
+			if (qman_hwerr_txts[i].mask & ecsr_val) {
+				log_additional_error_info(isr_mask, ecsr_val);
+				/* Re-arm error capture registers */
+				qm_out(ECSR, ecsr_val);
+			}
+			if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
+				pr_devel("Un-enabling error 0x%x\n",
+					 qman_hwerr_txts[i].mask);
+				ier_val &= ~qman_hwerr_txts[i].mask;
+				qm_err_isr_enable_write(qm, ier_val);
+			}
+		}
+	}
+	qm_err_isr_status_clear(qm, isr_val);
+	return IRQ_HANDLED;
+}
+
+static int __bind_irq(void)
+{
+	int ret, err_irq;
+
+	err_irq = of_irq_to_resource(qm_node, 0, NULL);
+	if (err_irq == NO_IRQ) {
+		pr_info("Can't get %s property 'interrupts'\n",
+			qm_node->full_name);
+		return -ENODEV;
+	}
+	ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node);
+	if (ret)  {
+		pr_err("request_irq() failed %d for '%s'\n",
+		       ret, qm_node->full_name);
+		return -ENODEV;
+	}
+	/* Write-to-clear any stale bits, (eg. starvation being asserted prior
+	 * to resource allocation during driver init). */
+	qm_err_isr_status_clear(qm, 0xffffffff);
+	/* Enable Error Interrupts */
+	qm_err_isr_enable_write(qm, 0xffffffff);
+	return 0;
+}
+
+int qman_init_ccsr(struct device_node *node)
+{
+	int ret;
+
+	if (!qman_have_ccsr())
+		return 0;
+	if (node != qm_node)
+		return -EINVAL;
+	/* FQD memory */
+	qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
+	/* PFDR memory */
+	qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
+	qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
+	/* thresholds */
+	qm_set_pfdr_threshold(qm, 512, 64);
+	qm_set_sfdr_threshold(qm, 128);
+	/* clear stale PEBI bit from interrupt status register */
+	qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
+	/* corenet initiator settings */
+	qm_set_corenet_initiator(qm);
+	/* HID settings */
+	qm_set_hid(qm);
+	/* Set scheduling weights to defaults */
+	for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
+		qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
+	/* We are not prepared to accept ERNs for hardware enqueues */
+	qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
+	qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
+	/* Initialise Error Interrupt Handler */
+	ret = __bind_irq();
+	if (ret)
+		return ret;
+	return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+	static int done;
+	static u32 liodn_offset;
+	u32 before, after;
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+	if (!qman_have_ccsr())
+		return;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		before = qm_in(REV3_QCSP_LIO_CFG(idx));
+	else
+		before = qm_in(QCSP_LIO_CFG(idx));
+	if (!done) {
+		liodn_offset = before & LIO_CFG_LIODN_MASK;
+		done = 1;
+		return;
+	}
+	after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		qm_out(REV3_QCSP_LIO_CFG(idx), after);
+	else
+		qm_out(QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+int qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+	int idx = channel - QM_CHANNEL_SWPORTAL0;
+	u32 before, after;
+
+	if (!qman_have_ccsr())
+		return -ENODEV;
+
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+		before = qm_in(REV3_QCSP_IO_CFG(idx));
+		/* Each pair of vcpu share the same SRQ(SDEST) */
+		cpu_idx /= 2;
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_out(REV3_QCSP_IO_CFG(idx), after);
+	} else {
+		before = qm_in(QCSP_IO_CFG(idx));
+		after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+		qm_out(QCSP_IO_CFG(idx), after);
+	}
+	return 0;
+}
+
+#define MISC_CFG_WPM_MASK 0x00000002
+int qm_set_wpm(int wpm)
+{
+	u32 before;
+	u32 after;
+
+	if (!qman_have_ccsr())
+		return -ENODEV;
+
+	before = qm_in(MISC_CFG);
+	after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
+	qm_out(MISC_CFG, after);
+	return 0;
+}
+
+int qm_get_wpm(int *wpm)
+{
+	u32 before;
+
+	if (!qman_have_ccsr())
+		return -ENODEV;
+
+	before = qm_in(MISC_CFG);
+	*wpm = (before & MISC_CFG_WPM_MASK) >> 1;
+	return 0;
+}
+
+#ifdef CONFIG_SYSFS
+
+#define DRV_NAME	"fsl-qman"
+
+static ssize_t show_pfdr_fpc(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
+};
+
+static ssize_t show_dlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	u32 data;
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
+		return -EINVAL;
+	data = qm_in(DCP_DLM_AVG(i));
+	return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+			(data & 0x000000ff)*390625);
+};
+
+static ssize_t set_dlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+	unsigned long val;
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i) != 1)
+		return -EINVAL;
+	if (kstrtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	qm_out(DCP_DLM_AVG(i), val);
+	return count;
+};
+
+static ssize_t show_pfdr_cfg(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
+};
+
+static ssize_t set_pfdr_cfg(struct device *dev,
+	struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	qm_out(PFDR_CFG, val);
+	return count;
+};
+
+static ssize_t show_sfdr_in_use(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
+};
+
+static ssize_t show_idle_stat(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
+};
+
+static ssize_t show_ci_rlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	u32 data = qm_in(CI_RLM_AVG);
+
+	return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+			(data & 0x000000ff)*390625);
+};
+
+static ssize_t set_ci_rlm_avg(struct device *dev,
+	struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val)) {
+		dev_dbg(dev, "invalid input %s\n", buf);
+		return -EINVAL;
+	}
+	qm_out(CI_RLM_AVG, val);
+	return count;
+};
+
+static ssize_t show_err_isr(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
+};
+
+
+static ssize_t show_sbec(struct device *dev,
+	struct device_attribute *dev_attr, char *buf)
+{
+	int i;
+
+	if (sscanf(dev_attr->attr.name, "sbec_%d", &i) != 1)
+		return -EINVAL;
+	return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
+static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
+static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
+static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
+		show_ci_rlm_avg, set_ci_rlm_avg);
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
+
+static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *qman_dev_attributes[] = {
+	&dev_attr_pfdr_fpc.attr,
+	&dev_attr_pfdr_cfg.attr,
+	&dev_attr_idle_stat.attr,
+	&dev_attr_ci_rlm_avg.attr,
+	&dev_attr_err_isr.attr,
+	&dev_attr_dcp0_dlm_avg.attr,
+	&dev_attr_dcp1_dlm_avg.attr,
+	&dev_attr_dcp2_dlm_avg.attr,
+	&dev_attr_dcp3_dlm_avg.attr,
+	/* sfdr_in_use will be added if necessary */
+	NULL
+};
+
+static struct attribute *qman_dev_ecr_attributes[] = {
+	&dev_attr_sbec_0.attr,
+	&dev_attr_sbec_1.attr,
+	&dev_attr_sbec_2.attr,
+	&dev_attr_sbec_3.attr,
+	&dev_attr_sbec_4.attr,
+	&dev_attr_sbec_5.attr,
+	&dev_attr_sbec_6.attr,
+	&dev_attr_sbec_7.attr,
+	&dev_attr_sbec_8.attr,
+	&dev_attr_sbec_9.attr,
+	&dev_attr_sbec_10.attr,
+	&dev_attr_sbec_11.attr,
+	&dev_attr_sbec_12.attr,
+	&dev_attr_sbec_13.attr,
+	&dev_attr_sbec_14.attr,
+	NULL
+};
+
+/* root level */
+static const struct attribute_group qman_dev_attr_grp = {
+	.name = NULL,
+	.attrs = qman_dev_attributes
+};
+static const struct attribute_group qman_dev_ecr_grp = {
+	.name = "error_capture",
+	.attrs = qman_dev_ecr_attributes
+};
+
+static int of_fsl_qman_remove(struct platform_device *ofdev)
+{
+	sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+	return 0;
+};
+
+static int of_fsl_qman_probe(struct platform_device *ofdev)
+{
+	int ret;
+	struct device *dev = &ofdev->dev;
+
+	ret = sysfs_create_group(&dev->kobj, &qman_dev_attr_grp);
+	if (ret)
+		goto done;
+	ret = sysfs_add_file_to_group(&dev->kobj,
+		&dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
+	if (ret)
+		goto del_group_0;
+	ret = sysfs_create_group(&dev->kobj, &qman_dev_ecr_grp);
+	if (ret)
+		goto del_group_0;
+
+	goto done;
+
+del_group_0:
+	sysfs_remove_group(&dev->kobj, &qman_dev_attr_grp);
+done:
+	if (ret)
+		dev_err(dev, "Cannot create dev attributes ret=%d\n", ret);
+	return ret;
+};
+
+static const struct of_device_id of_fsl_qman_ids[] = {
+	{
+		.compatible = "fsl,qman",
+	},
+	{}
+};
+
+static struct platform_driver of_fsl_qman_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_fsl_qman_ids,
+	},
+	.probe = of_fsl_qman_probe,
+	.remove	= of_fsl_qman_remove,
+};
+
+builtin_platform_driver(of_fsl_qman_driver);
+
+#endif /* CONFIG_SYSFS */
diff --git a/drivers/soc/fsl/qbman/qman.h b/drivers/soc/fsl/qbman/qman.h
new file mode 100644
index 0000000..7aaaa8f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.h
@@ -0,0 +1,1128 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH	0x0000
+#define QM_REG_EQCR_CI_CINH	0x0004
+#define QM_REG_EQCR_ITR		0x0008
+#define QM_REG_DQRR_PI_CINH	0x0040
+#define QM_REG_DQRR_CI_CINH	0x0044
+#define QM_REG_DQRR_ITR		0x0048
+#define QM_REG_DQRR_DCAP	0x0050
+#define QM_REG_DQRR_SDQCR	0x0054
+#define QM_REG_DQRR_VDQCR	0x0058
+#define QM_REG_DQRR_PDQCR	0x005c
+#define QM_REG_MR_PI_CINH	0x0080
+#define QM_REG_MR_CI_CINH	0x0084
+#define QM_REG_MR_ITR		0x0088
+#define QM_REG_CFG		0x0100
+#define QM_REG_ISR		0x0e00
+#define QM_REG_IIR		0x0e0c
+#define QM_REG_ITPR		0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR		0x0000
+#define QM_CL_DQRR		0x1000
+#define QM_CL_MR		0x2000
+#define QM_CL_EQCR_PI_CENA	0x3000
+#define QM_CL_EQCR_CI_CENA	0x3100
+#define QM_CL_DQRR_PI_CENA	0x3200
+#define QM_CL_DQRR_CI_CENA	0x3300
+#define QM_CL_MR_PI_CENA	0x3400
+#define QM_CL_MR_CI_CENA	0x3500
+#define QM_CL_CR		0x3800
+#define QM_CL_RR0		0x3900
+#define QM_CL_RR1		0x3940
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o)		__raw_readl((qm)->addr_ci + (o))
+#define __qm_out(qm, o, val)	__raw_writel((val), (qm)->addr_ci + (o))
+#define qm_in(reg)		__qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val)	__qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
+#define __qm_cl_in(qm, o)	__raw_readl((qm)->addr_ce + (o))
+#define __qm_cl_out(qm, o, val) \
+	do { \
+		u32 *__tmpclout = (qm)->addr_ce + (o); \
+		__raw_writel((val), __tmpclout); \
+		dcbf(__tmpclout); \
+	} while (0)
+#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg)	    __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+	__qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx)	((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+	/* 'first' is included, 'last' is excluded */
+	if (first <= last)
+		return last - first;
+	return ringsize + last - first;
+}
+
+/* Portal modes.
+ *   Enum types;
+ *     pmode == production mode
+ *     cmode == consumption mode,
+ *     dmode == h/w dequeue mode.
+ *   Enum values use 3 letter codes. First letter matches the portal mode,
+ *   remaining two letters indicate;
+ *     ci == cache-inhibited portal register
+ *     ce == cache-enabled portal register
+ *     vb == in-band valid-bit (cache-enabled)
+ *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode {		/* matches QCSP_CFG::EPM */
+	qm_eqcr_pci = 0,	/* PI index, cache-inhibited */
+	qm_eqcr_pce = 1,	/* PI index, cache-enabled */
+	qm_eqcr_pvb = 2		/* valid-bit */
+};
+enum qm_dqrr_dmode {		/* matches QCSP_CFG::DP */
+	qm_dqrr_dpush = 0,	/* SDQCR  + VDQCR */
+	qm_dqrr_dpull = 1	/* PDQCR */
+};
+enum qm_dqrr_pmode {		/* s/w-only */
+	qm_dqrr_pci,		/* reads DQRR_PI_CINH */
+	qm_dqrr_pce,		/* reads DQRR_PI_CENA */
+	qm_dqrr_pvb		/* reads valid-bit */
+};
+enum qm_dqrr_cmode {		/* matches QCSP_CFG::DCM */
+	qm_dqrr_cci = 0,	/* CI index, cache-inhibited */
+	qm_dqrr_cce = 1,	/* CI index, cache-enabled */
+	qm_dqrr_cdc = 2		/* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode {		/* s/w-only */
+	qm_mr_pci,		/* reads MR_PI_CINH */
+	qm_mr_pce,		/* reads MR_PI_CENA */
+	qm_mr_pvb		/* reads valid-bit */
+};
+enum qm_mr_cmode {		/* matches QCSP_CFG::MM */
+	qm_mr_cci = 0,		/* CI index, cache-inhibited */
+	qm_mr_cce = 1		/* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE		8
+#define QM_DQRR_SIZE		16
+#define QM_MR_SIZE		8
+
+struct qm_eqcr {
+	struct qm_eqcr_entry *ring, *cursor;
+	u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	u32 busy;
+	enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+	const struct qm_dqrr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum qm_dqrr_dmode dmode;
+	enum qm_dqrr_pmode pmode;
+	enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+	const struct qm_mr_entry *ring, *cursor;
+	u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum qm_mr_pmode pmode;
+	enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+	struct qm_mc_command *cr;
+	struct qm_mc_result *rr;
+	u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	enum {
+		/* Can be _mc_start()ed */
+		qman_mc_idle,
+		/* Can be _mc_commit()ed or _mc_abort()ed */
+		qman_mc_user,
+		/* Can only be _mc_retry()ed */
+		qman_mc_hw
+	} state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+	void __iomem *addr_ce;	/* cache-enabled */
+	void __iomem *addr_ci;	/* cache-inhibited */
+};
+
+struct qm_portal {
+	/* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
+	 * and including 'mc' fits within a cacheline (yay!). The 'config' part
+	 * is setup-only, so isn't a cause for a concern. In other words, don't
+	 * rearrange this structure on a whim, there be dragons ... */
+	struct qm_addr addr;
+	struct qm_eqcr eqcr;
+	struct qm_dqrr dqrr;
+	struct qm_mr mr;
+	struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+/* --- EQCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+	/* NB: this is odd-looking, but experiments show that it generates fast
+	 * code with essentially no branching overheads. We increment to the
+	 * next EQCR pointer and handle overflow and 'vbit'. */
+	struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+	eqcr->cursor = EQCR_CARRYCLEAR(partial);
+	if (partial != eqcr->cursor)
+		eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+				enum qm_eqcr_pmode pmode,
+				unsigned int eq_stash_thresh,
+				int eq_stash_prio)
+{
+	/* This use of 'register', as well as all other occurrences, is because
+	 * it has been observed to generate much faster code with gcc than is
+	 * otherwise the case. */
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u32 cfg;
+	u8 pi;
+
+	eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
+	eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(EQCR_CI);
+	pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	eqcr->cursor = eqcr->ring + pi;
+	eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+			QM_EQCR_VERB_VBIT : 0;
+	eqcr->available = QM_EQCR_SIZE - 1 -
+			qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+	eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+	eqcr->pmode = pmode;
+#endif
+	cfg = (qm_in(CFG) & 0x00ffffff) |
+		(eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+		(eq_stash_prio << 26)	| /* QCSP_CFG: EP */
+		((pmode & 0x3) << 24);	/* QCSP_CFG::EPM */
+	qm_out(CFG, cfg);
+	return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+	return (qm_in(CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+	u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+	DPA_ASSERT(!eqcr->busy);
+	if (pi != EQCR_PTR2IDX(eqcr->cursor))
+		pr_crit("losing uncommited EQCR entries\n");
+	if (ci != eqcr->ci)
+		pr_crit("missing existing EQCR completions\n");
+	if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+		pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+								 *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(!eqcr->busy);
+	if (!eqcr->available)
+		return NULL;
+
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dcbz_64(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+								*portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci;
+
+	DPA_ASSERT(!eqcr->busy);
+	if (!eqcr->available) {
+		old_ci = eqcr->ci;
+		eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+		diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+		eqcr->available += diff;
+		if (!diff)
+			return NULL;
+	}
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 1;
+#endif
+	dcbz_64(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(eqcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+					struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(eqcr->busy);
+	DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+	if (eqcr->available == 1)
+		return NULL;
+	eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	dcbf(eqcr->cursor);
+	EQCR_INC(eqcr);
+	eqcr->available--;
+	dcbz_64(eqcr->cursor);
+	return eqcr->cursor;
+}
+
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+	DPA_ASSERT(eqcr->busy); \
+	DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
+	DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
+} while (0)
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	EQCR_COMMIT_CHECKS(eqcr);
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+	eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	EQCR_INC(eqcr);
+	eqcr->available--;
+	dcbf(eqcr->cursor);
+	hwsync();
+	qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+	qm_cl_invalidate(EQCR_PI);
+	qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	EQCR_COMMIT_CHECKS(eqcr);
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+	eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	EQCR_INC(eqcr);
+	eqcr->available--;
+	dcbf(eqcr->cursor);
+	lwsync();
+	qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	struct qm_eqcr_entry *eqcursor;
+
+	EQCR_COMMIT_CHECKS(eqcr);
+	DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+	lwsync();
+	eqcursor = eqcr->cursor;
+	eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+	dcbf(eqcursor);
+	EQCR_INC(eqcr);
+	eqcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+	diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+	u8 diff, old_ci = eqcr->ci;
+
+	eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+	qm_cl_invalidate(EQCR_CI);
+	diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+	eqcr->available += diff;
+	return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	eqcr->ithresh = ithresh;
+	qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+	register struct qm_eqcr *eqcr = &portal->eqcr;
+
+	return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+/* FIXME: many possible improvements;
+ * - look at changing the API to use pointer rather than index parameters now
+ *   that 'cursor' is a pointer,
+ * - consider moving other parameters to pointer if it could help (ci)
+ */
+
+#define DQRR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline const struct qm_dqrr_entry *DQRR_INC(
+						const struct qm_dqrr_entry *e)
+{
+	return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+	qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+		((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+				const struct qm_portal_config *config,
+				enum qm_dqrr_dmode dmode,
+				__maybe_unused enum qm_dqrr_pmode pmode,
+				enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	u32 cfg;
+
+	/* Make sure the DQRR will be idle when we enable */
+	qm_out(DQRR_SDQCR, 0);
+	qm_out(DQRR_VDQCR, 0);
+	qm_out(DQRR_PDQCR, 0);
+	dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
+	dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+	dqrr->cursor = dqrr->ring + dqrr->ci;
+	dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+	dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+			QM_DQRR_VERB_VBIT : 0;
+	dqrr->ithresh = qm_in(DQRR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	dqrr->dmode = dmode;
+	dqrr->pmode = pmode;
+	dqrr->cmode = cmode;
+#endif
+	/* Invalidate every ring entry before beginning */
+	for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+		dcbi(qm_cl(dqrr->ring, cfg));
+	cfg = (qm_in(CFG) & 0xff000f00) |
+		((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+		((dmode & 1) << 18) |			/* DP */
+		((cmode & 3) << 16) |			/* DCM */
+		0xa0 |					/* RE+SE */
+		(0 ? 0x40 : 0) |			/* Ignore RP */
+		(0 ? 0x10 : 0);				/* Ignore SP */
+	qm_out(CFG, cfg);
+	qm_dqrr_set_maxfill(portal, max_fill);
+	return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if ((dqrr->cmode != qm_dqrr_cdc) &&
+			(dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+		pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+						struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	if (!dqrr->fill)
+		return NULL;
+	return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->fill);
+	dqrr->cursor = DQRR_INC(dqrr->cursor);
+	return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	u8 diff, old_pi = dqrr->pi;
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+	dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+	diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+	dqrr->fill += diff;
+	return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+	qm_cl_invalidate(DQRR_PI);
+	qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	u8 diff, old_pi = dqrr->pi;
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+	dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+	diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+	dqrr->fill += diff;
+	return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+	const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+	DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+	/* when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads". */
+	if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+		dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+		if (!dqrr->pi)
+			dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+		dqrr->fill++;
+	}
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+	dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+	qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+	dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+	qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+	qm_cl_invalidate(DQRR_CI);
+	qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+	dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+	qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+	dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+	qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+					int park)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(DQRR_DCAP, (0 << 8) |	/* S */
+		((park ? 1 : 0) << 6) |	/* PK */
+		idx);			/* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+					const struct qm_dqrr_entry *dq,
+					int park)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+	u8 idx = DQRR_PTR2IDX(dq);
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	DPA_ASSERT((dqrr->ring + idx) == dq);
+	DPA_ASSERT(idx < QM_DQRR_SIZE);
+	qm_out(DQRR_DCAP, (0 << 8) |		/* DQRR_DCAP::S */
+		((park ? 1 : 0) << 6) |		/* DQRR_DCAP::PK */
+		idx);				/* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_out(DQRR_DCAP, (1 << 8) |		/* DQRR_DCAP::S */
+		((u32)bitmask << 16));		/* DQRR_DCAP::DCAP_CI */
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	qm_cl_invalidate(DQRR_CI);
+	qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+	return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+	return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+	__maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+	qm_out(DQRR_DCAP, (0 << 8) |		/* S */
+		(1 << 6) |			/* PK */
+		(idx & (QM_DQRR_SIZE - 1)));	/* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+	qm_out(DQRR_DCAP, (0 << 8) |		/* S */
+		(1 << 6) |			/* PK */
+		DQRR_PTR2IDX(dqrr->cursor));	/* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+	qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+	return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+	qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+	return qm_in(DQRR_VDQCR);
+}
+
+static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
+{
+	qm_out(DQRR_PDQCR, pdqcr);
+}
+
+static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
+{
+	return qm_in(DQRR_PDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+	register struct qm_dqrr *dqrr = &portal->dqrr;
+
+	return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+	return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+	(void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+	return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+	return MR_CARRYCLEAR(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+		enum qm_mr_cmode cmode)
+{
+	register struct qm_mr *mr = &portal->mr;
+	u32 cfg;
+
+	mr->ring = portal->addr.addr_ce + QM_CL_MR;
+	mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+	mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+	mr->cursor = mr->ring + mr->ci;
+	mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+	mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+	mr->ithresh = qm_in(MR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mr->pmode = pmode;
+	mr->cmode = cmode;
+#endif
+	cfg = (qm_in(CFG) & 0xfffff0ff) |
+		((cmode & 1) << 8);		/* QCSP_CFG:MM */
+	qm_out(CFG, cfg);
+	return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	if (mr->ci != MR_PTR2IDX(mr->cursor))
+		pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	if (!mr->fill)
+		return NULL;
+	return mr->cursor;
+}
+
+static inline u8 qm_mr_cursor(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	return MR_PTR2IDX(mr->cursor);
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->fill);
+	mr->cursor = MR_INC(mr->cursor);
+	return --mr->fill;
+}
+
+static inline u8 qm_mr_pci_update(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+	u8 diff, old_pi = mr->pi;
+
+	DPA_ASSERT(mr->pmode == qm_mr_pci);
+	mr->pi = qm_in(MR_PI_CINH);
+	diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+	mr->fill += diff;
+	return diff;
+}
+
+static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->pmode == qm_mr_pce);
+	qm_cl_invalidate(MR_PI);
+	qm_cl_touch_ro(MR_PI);
+}
+
+static inline u8 qm_mr_pce_update(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+	u8 diff, old_pi = mr->pi;
+
+	DPA_ASSERT(mr->pmode == qm_mr_pce);
+	mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
+	diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+	mr->fill += diff;
+	return diff;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+	const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+	DPA_ASSERT(mr->pmode == qm_mr_pvb);
+	/* when accessing 'verb', use __raw_readb() to ensure that compiler
+	 * inlining doesn't try to optimise out "excess reads". */
+	if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+		if (!mr->pi)
+			mr->vbit ^= QM_MR_VERB_VBIT;
+		mr->fill++;
+		res = MR_INC(res);
+	}
+	dcbit_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cci);
+	mr->ci = MR_PTR2IDX(mr->cursor);
+	qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cce);
+	qm_cl_invalidate(MR_CI);
+	qm_cl_touch_rw(MR_CI);
+}
+
+static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cce);
+	mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+	qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	DPA_ASSERT(mr->cmode == qm_mr_cce);
+	mr->ci = MR_PTR2IDX(mr->cursor);
+	qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline u8 qm_mr_get_ci(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	return mr->ci;
+}
+
+static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
+{
+	register struct qm_mr *mr = &portal->mr;
+
+	return mr->ithresh;
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+	qm_out(MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+	register struct qm_mc *mc = &portal->mc;
+
+	mc->cr = portal->addr.addr_ce + QM_CL_CR;
+	mc->rr = portal->addr.addr_ce + QM_CL_RR0;
+	mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+			QM_MCC_VERB_VBIT) ?  0 : 1;
+	mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (mc->state != qman_mc_idle)
+		pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+	register struct qm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_user;
+#endif
+	dcbz_64(mc->cr);
+	return mc->cr;
+}
+
+static inline void qm_mc_abort(struct qm_portal *portal)
+{
+	__maybe_unused register struct qm_mc *mc = &portal->mc;
+
+	DPA_ASSERT(mc->state == qman_mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+	register struct qm_mc *mc = &portal->mc;
+	struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPA_ASSERT(mc->state == qman_mc_user);
+	lwsync();
+	mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+	dcbf(mc->cr);
+	dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+	register struct qm_mc *mc = &portal->mc;
+	struct qm_mc_result *rr = mc->rr + mc->rridx;
+
+	DPA_ASSERT(mc->state == qman_mc_hw);
+	/* The inactive response register's verb byte always returns zero until
+	 * its command is submitted and completed. This includes the valid-bit,
+	 * in case you were wondering... */
+	if (!__raw_readb(&rr->verb)) {
+		dcbit_ro(rr);
+		return NULL;
+	}
+	mc->rridx ^= 1;
+	mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	mc->state = qman_mc_idle;
+#endif
+	return rr;
+}
+
+/* --- Portal interrupt register API --- */
+
+static inline int qm_isr_init(__always_unused struct qm_portal *portal)
+{
+	return 0;
+}
+
+static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
+{
+}
+
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+	qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+	return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+					u32 val)
+{
+	__qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+}
diff --git a/drivers/soc/fsl/qbman/qman_api.c b/drivers/soc/fsl/qbman/qman_api.c
new file mode 100644
index 0000000..8e2a9b7
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_api.c
@@ -0,0 +1,2819 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman.h"
+
+/* Compilation constants */
+#define DQRR_MAXFILL	15
+#define EQCR_ITHRESH	4	/* if EQCR congests, interrupt threshold */
+#define IRQNAME		"QMan portal %d"
+#define MAX_IRQNAME	16	/* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+#define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */
+/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
+ * positive, and rounding to the closest value if it's zero. NB, this macro
+ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
+ * that are compatible with this. NB, these arguments should not be expressions
+ * unless it is safe for them to be evaluated multiple times. Eg. do not pass
+ * in "some_value++" as a parameter to the macro! */
+#define ROUNDING(n, d, r) \
+	(((r) < 0) ? div64_u64((n), (d)) : \
+	(((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
+	div64_u64(((n) + ((d) / 2)), (d))))
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...). */
+#define FQLOCK(fq) \
+	do { \
+		struct qman_fq *__fq478 = (fq); \
+		if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+			spin_lock(&__fq478->fqlock); \
+	} while (0)
+#define FQUNLOCK(fq) \
+	do { \
+		struct qman_fq *__fq478 = (fq); \
+		if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+			spin_unlock(&__fq478->fqlock); \
+	} while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+	set_bits(mask, &fq->flags);
+}
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+	clear_bits(mask, &fq->flags);
+}
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+	return fq->flags & mask;
+}
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+	return !(fq->flags & mask);
+}
+
+struct qman_portal {
+	struct qm_portal p;
+	unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
+	unsigned long irq_sources;
+	u32 use_eqcr_ci_stashing;
+	u32 slowpoll;	/* only used when interrupts are off */
+	struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+	raw_spinlock_t sharing_lock; /* only used if is_shared */
+	int is_shared;
+	struct qman_portal *sharing_redirect;
+#endif
+	u32 sdqcr;
+	int dqrr_disable_ref;
+	/* A portal-specific handler for DCP ERNs. If this is NULL, the global
+	 * handler is called instead. */
+	qman_cb_dc_ern cb_dc_ern;
+	/* When the cpu-affine portal is activated, this is non-NULL */
+	const struct qm_portal_config *config;
+	/* This is needed for providing a non-NULL device to dma_map_***() */
+	struct platform_device *pdev;
+	struct dpa_rbtree retire_table;
+	char irqname[MAX_IRQNAME];
+	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+	struct qman_cgrs *cgrs;
+	/* linked-list of CSCN handlers. */
+	struct list_head cgr_cbs;
+	/* list lock */
+	spinlock_t cgr_lock;
+	/* track if memory was allocated by the driver */
+	u8 alloced;
+};
+
+#ifdef FSL_DPA_PORTAL_SHARE
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+		else \
+			local_irq_save(irqflags); \
+	} while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+	do { \
+		if ((p)->is_shared) \
+			raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+						   irqflags); \
+		else \
+			local_irq_restore(irqflags); \
+	} while (0)
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler. */
+static qman_cb_dc_ern cb_dc_ern;
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+void *affine_portals[NR_CPUS];
+
+/* "raw" gets the cpu-local struct whether it's a redirect or not. */
+static inline struct qman_portal *get_raw_affine_portal(void)
+{
+	return &get_cpu_var(qman_affine_portal);
+}
+/* For ops that can redirect, this obtains the portal to use */
+#ifdef FSL_DPA_PORTAL_SHARE
+static inline struct qman_portal *get_affine_portal(void)
+{
+	struct qman_portal *p = get_raw_affine_portal();
+
+	if (p->sharing_redirect)
+		return p->sharing_redirect;
+	return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+/* For every "get", there must be a "put" */
+static inline void put_affine_portal(void)
+{
+	put_cpu_var(qman_affine_portal);
+}
+/* Exception: poll functions assume the caller is cpu-affine and in no risk of
+ * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
+ * semantic - ie. to disable pre-emption. Some use-cases expect the execution
+ * context to remain as non-atomic during poll-triggered callbacks as it was
+ * when the poll API was first called (eg. NAPI), so we go out of our way in
+ * this case to not disable pre-emption. */
+static inline struct qman_portal *get_poll_portal(void)
+{
+	return this_cpu_ptr(&qman_affine_portal);
+}
+#define put_poll_portal()
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table. */
+IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
+
+/* This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on. */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+	int ret = fqtree_push(&p->retire_table, fq);
+
+	if (ret)
+		pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+	return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+	fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+	return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+	num_entries++;
+	/* Allocate 1 more entry since the first entry is not used */
+	qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
+	if (!qman_fq_lookup_table)
+		return -ENOMEM;
+	qman_fq_lookup_table_size = num_entries;
+	pr_info("Allocated lookup table at %p, entry count %lu\n",
+		qman_fq_lookup_table, (unsigned long)qman_fq_lookup_table_size);
+	return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+	u32 i;
+
+	spin_lock(&fq_hash_table_lock);
+	/* Can't use index zero because this has special meaning
+	 * in context_b field. */
+	for (i = 1; i < qman_fq_lookup_table_size; i++) {
+		if (qman_fq_lookup_table[i] == NULL) {
+			*entry = i;
+			qman_fq_lookup_table[i] = fq;
+			spin_unlock(&fq_hash_table_lock);
+			return 0;
+		}
+	}
+	spin_unlock(&fq_hash_table_lock);
+	return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+	spin_lock(&fq_hash_table_lock);
+	BUG_ON(entry >= qman_fq_lookup_table_size);
+	qman_fq_lookup_table[entry] = NULL;
+	spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+	BUG_ON(entry >= qman_fq_lookup_table_size);
+	return qman_fq_lookup_table[entry];
+}
+#endif
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do. */
+#define SLOW_POLL_IDLE	 1000
+#define SLOW_POLL_BUSY	 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+	struct qman_portal *p = ptr;
+	/*
+	 * The CSCI source is cleared inside __poll_portal_slow(), because
+	 * it could race against a Query Congestion State command also given
+	 * as part of the handling of this interrupt source. We mustn't
+	 * clear it a second time in this top-level function.
+	 */
+	u32 clear = QM_DQAVAIL_MASK | (p->irq_sources & ~QM_PIRQ_CSCI);
+	u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+	/* DQRR-handling if it's interrupt-driven */
+	if (is & QM_PIRQ_DQRI)
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+	/* Handling of anything else that's interrupt-driven */
+	clear |= __poll_portal_slow(p, is);
+	qm_isr_status_clear(&p->p, clear);
+	return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues(). */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+	unsigned long irqflags __maybe_unused;
+	PORTAL_IRQ_LOCK(p, irqflags);
+	if (!(p->dqrr_disable_ref++))
+		qm_dqrr_set_maxfill(&p->p, 0);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+	const struct qm_mr_entry *msg;
+loop:
+	msg = qm_mr_current(p);
+	if (!msg) {
+		/* if MR was full and h/w had other FQRNI entries to produce, we
+		 * need to allow it time to produce those entries once the
+		 * existing entries are consumed. A worst-case situation
+		 * (fully-loaded system) means h/w sequencers may have to do 3-4
+		 * other things before servicing the portal's MR pump, each of
+		 * which (if slow) may take ~50 qman cycles (which is ~200
+		 * processor cycles). So rounding up and then multiplying this
+		 * worst-case estimate by a factor of 10, just to be
+		 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+		 * one entry at a time, so h/w has an opportunity to produce new
+		 * entries well before the ring has been fully consumed, so
+		 * we're being *really* paranoid here. */
+		u64 now, then = mfatb();
+
+		do {
+			now = mfatb();
+		} while ((then + 10000) > now);
+		msg = qm_mr_current(p);
+		if (!msg)
+			return 0;
+	}
+	if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+		/* We aren't draining anything but FQRNIs */
+		pr_err("Found verb 0x%x in MR\n", msg->verb);
+		return -1;
+	}
+	qm_mr_next(p);
+	qm_mr_cci_consume(p, 1);
+	goto loop;
+}
+
+struct qman_portal *qman_create_portal(
+			struct qman_portal *portal,
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs)
+{
+	struct qm_portal *__p;
+	char buf[16];
+	int ret;
+	u32 isdr;
+
+	if (!portal) {
+		portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+		if (!portal)
+			return portal;
+		portal->alloced = 1;
+	} else
+		portal->alloced = 0;
+
+	__p = &portal->p;
+
+	portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
+								1 : 0);
+
+	/* prep the low-level portal struct with the mapped addresses from the
+	 * config, everything that follows depends on it and "config" is more
+	 * for (de)reference... */
+	__p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+	__p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+	/*
+	 * If CI-stashing is used, the current defaults use a threshold of 3,
+	 * and stash with high-than-DQRR priority.
+	 */
+	if (qm_eqcr_init(__p, qm_eqcr_pvb,
+			portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+		pr_err("EQCR initialisation failed\n");
+		goto fail_eqcr;
+	}
+	if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
+			qm_dqrr_cdc, DQRR_MAXFILL)) {
+		pr_err("DQRR initialisation failed\n");
+		goto fail_dqrr;
+	}
+	if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
+		pr_err("MR initialisation failed\n");
+		goto fail_mr;
+	}
+	if (qm_mc_init(__p)) {
+		pr_err("MC initialisation failed\n");
+		goto fail_mc;
+	}
+	if (qm_isr_init(__p)) {
+		pr_err("ISR initialisation failed\n");
+		goto fail_isr;
+	}
+	/* static interrupt-gating controls */
+	qm_dqrr_set_ithresh(__p, QMAN_PIRQ_DQRR_ITHRESH);
+	qm_mr_set_ithresh(__p, QMAN_PIRQ_MR_ITHRESH);
+	qm_isr_set_iperiod(__p, QMAN_PIRQ_IPERIOD);
+	portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+	if (!portal->cgrs)
+		goto fail_cgrs;
+	/* initial snapshot is no-depletion */
+	qman_cgrs_init(&portal->cgrs[1]);
+	if (cgrs)
+		portal->cgrs[0] = *cgrs;
+	else
+		/* if the given mask is NULL, assume all CGRs can be seen */
+		qman_cgrs_fill(&portal->cgrs[0]);
+	INIT_LIST_HEAD(&portal->cgr_cbs);
+	spin_lock_init(&portal->cgr_lock);
+	portal->bits = 0;
+	portal->slowpoll = 0;
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	portal->eqci_owned = NULL;
+#endif
+#ifdef FSL_DPA_PORTAL_SHARE
+	raw_spin_lock_init(&portal->sharing_lock);
+	portal->is_shared = config->public_cfg.is_shared;
+	portal->sharing_redirect = NULL;
+#endif
+	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+	portal->dqrr_disable_ref = 0;
+	portal->cb_dc_ern = NULL;
+	sprintf(buf, "qportal-%d", config->public_cfg.channel);
+	portal->pdev = platform_device_alloc(buf, -1);
+	if (!portal->pdev)
+		goto fail_devalloc;
+	if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+		goto fail_devadd;
+	ret = platform_device_add(portal->pdev);
+	if (ret)
+		goto fail_devadd;
+	dpa_rbtree_init(&portal->retire_table);
+	isdr = 0xffffffff;
+	qm_isr_disable_write(__p, isdr);
+	portal->irq_sources = 0;
+	qm_isr_enable_write(__p, portal->irq_sources);
+	qm_isr_status_clear(__p, 0xffffffff);
+	snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+	if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+				portal)) {
+		pr_err("request_irq() failed\n");
+		goto fail_irq;
+	}
+	if ((config->public_cfg.cpu != -1) &&
+			irq_can_set_affinity(config->public_cfg.irq) &&
+			irq_set_affinity(config->public_cfg.irq,
+				cpumask_of(config->public_cfg.cpu))) {
+		pr_err("irq_set_affinity() failed\n");
+		goto fail_affinity;
+	}
+
+	/* Need EQCR to be empty before continuing */
+	isdr ^= QM_PIRQ_EQCI;
+	qm_isr_disable_write(__p, isdr);
+	ret = qm_eqcr_get_fill(__p);
+	if (ret) {
+		pr_err("EQCR unclean\n");
+		goto fail_eqcr_empty;
+	}
+	isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
+	qm_isr_disable_write(__p, isdr);
+	if (qm_dqrr_current(__p) != NULL) {
+		pr_err("DQRR unclean\n");
+		qm_dqrr_cdc_consume_n(__p, 0xffff);
+	}
+	if (qm_mr_current(__p) != NULL) {
+		/* special handling, drain just in case it's a few FQRNIs */
+		if (drain_mr_fqrni(__p)) {
+			const struct qm_mr_entry *e = qm_mr_current(__p);
+
+			pr_err("MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
+			       e->verb, e->ern.rc, e->ern.fd.addr_lo);
+			goto fail_dqrr_mr_empty;
+		}
+	}
+	/* Success */
+	portal->config = config;
+	qm_isr_disable_write(__p, 0);
+	qm_isr_uninhibit(__p);
+	/* Write a sane SDQCR */
+	qm_dqrr_sdqcr_set(__p, portal->sdqcr);
+	return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+	free_irq(config->public_cfg.irq, portal);
+fail_irq:
+	platform_device_del(portal->pdev);
+fail_devadd:
+	platform_device_put(portal->pdev);
+fail_devalloc:
+	kfree(portal->cgrs);
+fail_cgrs:
+	qm_isr_finish(__p);
+fail_isr:
+	qm_mc_finish(__p);
+fail_mc:
+	qm_mr_finish(__p);
+fail_mr:
+	qm_dqrr_finish(__p);
+fail_dqrr:
+	qm_eqcr_finish(__p);
+fail_eqcr:
+	return NULL;
+}
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs)
+{
+	struct qman_portal *res;
+	struct qman_portal *portal;
+
+	portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
+	res = qman_create_portal(portal, config, cgrs);
+	if (res) {
+		spin_lock(&affine_mask_lock);
+		cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+		affine_channels[config->public_cfg.cpu] =
+			config->public_cfg.channel;
+		affine_portals[config->public_cfg.cpu] = portal;
+		spin_unlock(&affine_mask_lock);
+	}
+	return res;
+}
+
+/* These checks are BUG_ON()s because the driver is already supposed to avoid
+ * these cases. */
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+								int cpu)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+	struct qman_portal *p = &per_cpu(qman_affine_portal, cpu);
+
+	/* Check that we don't already have our own portal */
+	BUG_ON(p->config);
+	/* Check that we aren't already slaving to another portal */
+	BUG_ON(p->is_shared);
+	/* Check that 'redirect' is prepared to have us */
+	BUG_ON(!redirect->config->public_cfg.is_shared);
+	/* These are the only elements to initialise when redirecting */
+	p->irq_sources = 0;
+	p->sharing_redirect = redirect;
+	affine_portals[cpu] = p;
+	return p;
+#else
+	BUG();
+	return NULL;
+#endif
+}
+
+void qman_destroy_portal(struct qman_portal *qm)
+{
+	const struct qm_portal_config *pcfg;
+
+	/* Stop dequeues on the portal */
+	qm_dqrr_sdqcr_set(&qm->p, 0);
+
+	/* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+	 * something related to QM_PIRQ_EQCI, this may need fixing.
+	 * Also, due to the prefetching model used for CI updates in the enqueue
+	 * path, this update will only invalidate the CI cacheline *after*
+	 * working on it, so we need to call this twice to ensure a full update
+	 * irrespective of where the enqueue processing was at when the teardown
+	 * began. */
+	qm_eqcr_cce_update(&qm->p);
+	qm_eqcr_cce_update(&qm->p);
+	pcfg = qm->config;
+
+	free_irq(pcfg->public_cfg.irq, qm);
+
+	kfree(qm->cgrs);
+	qm_isr_finish(&qm->p);
+	qm_mc_finish(&qm->p);
+	qm_mr_finish(&qm->p);
+	qm_dqrr_finish(&qm->p);
+	qm_eqcr_finish(&qm->p);
+
+	platform_device_del(qm->pdev);
+	platform_device_put(qm->pdev);
+
+	qm->config = NULL;
+	if (qm->alloced)
+		kfree(qm);
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+	/* We don't want to redirect if we're a slave, use "raw" */
+	struct qman_portal *qm = get_raw_affine_portal();
+	const struct qm_portal_config *pcfg;
+	int cpu;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+	if (qm->sharing_redirect) {
+		qm->sharing_redirect = NULL;
+		put_affine_portal();
+		return NULL;
+	}
+	qm->is_shared = 0;
+#endif
+	pcfg = qm->config;
+	cpu = pcfg->public_cfg.cpu;
+
+	qman_destroy_portal(qm);
+
+	spin_lock(&affine_mask_lock);
+	cpumask_clear_cpu(cpu, &affine_mask);
+	spin_unlock(&affine_mask_lock);
+	put_affine_portal();
+	return pcfg;
+}
+
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
+{
+	return &p->config->public_cfg;
+}
+EXPORT_SYMBOL(qman_p_get_portal_config);
+
+const struct qman_portal_config *qman_get_portal_config(void)
+{
+	struct qman_portal *p = get_affine_portal();
+	const struct qman_portal_config *ret = qman_p_get_portal_config(p);
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_get_portal_config);
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_mr_entry *msg, u8 verb)
+{
+	FQLOCK(fq);
+	switch (verb) {
+	case QM_MR_VERB_FQRL:
+		DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+		fq_clear(fq, QMAN_FQ_STATE_ORL);
+		table_del_fq(p, fq);
+		break;
+	case QM_MR_VERB_FQRN:
+		DPA_ASSERT((fq->state == qman_fq_state_parked) ||
+			(fq->state == qman_fq_state_sched));
+		DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+		fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+		if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		else
+			table_del_fq(p, fq);
+		fq->state = qman_fq_state_retired;
+		break;
+	case QM_MR_VERB_FQPN:
+		DPA_ASSERT(fq->state == qman_fq_state_sched);
+		DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+		fq->state = qman_fq_state_parked;
+	}
+	FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+	const struct qm_mr_entry *msg;
+
+	if (is & QM_PIRQ_CSCI) {
+		struct qman_cgrs rr, c;
+		struct qm_mc_result *mcr;
+		struct qman_cgr *cgr;
+		unsigned long irqflags __maybe_unused;
+
+		spin_lock_irqsave(&p->cgr_lock, irqflags);
+		/*
+		 * The CSCI bit must be cleared _before_ issuing the
+		 * Query Congestion State command, to ensure that a long
+		 * CGR State Change callback cannot miss an intervening
+		 * state change.
+		 */
+		qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+		qm_mc_start(&p->p);
+		qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+		while (!(mcr = qm_mc_result(&p->p)))
+			cpu_relax();
+		/* mask out the ones I'm not interested in */
+		qman_cgrs_and(&rr, (const struct qman_cgrs *)
+			&mcr->querycongestion.state, &p->cgrs[0]);
+		/* check previous snapshot for delta, enter/exit congestion */
+		qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+		/* update snapshot */
+		qman_cgrs_cp(&p->cgrs[1], &rr);
+		/* Invoke callback */
+		list_for_each_entry(cgr, &p->cgr_cbs, node)
+			if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+				cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+		spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+	}
+
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (is & QM_PIRQ_EQCI) {
+		unsigned long irqflags;
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		p->eqci_owned = NULL;
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		wake_up(&affine_queue);
+	}
+#endif
+
+	if (is & QM_PIRQ_EQRI) {
+		unsigned long irqflags __maybe_unused;
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		qm_eqcr_cce_update(&p->p);
+		qm_eqcr_set_ithresh(&p->p, 0);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		wake_up(&affine_queue);
+	}
+
+	if (is & QM_PIRQ_MRI) {
+		struct qman_fq *fq;
+		u8 verb, num = 0;
+mr_loop:
+		qm_mr_pvb_update(&p->p);
+		msg = qm_mr_current(&p->p);
+		if (!msg)
+			goto mr_done;
+		verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+		/* The message is a software ERN iff the 0x20 bit is set */
+		if (verb & 0x20) {
+			switch (verb) {
+			case QM_MR_VERB_FQRNI:
+				/* nada, we drop FQRNIs on the floor */
+				break;
+			case QM_MR_VERB_FQRN:
+			case QM_MR_VERB_FQRL:
+				/* Lookup in the retirement table */
+				fq = table_find_fq(p, msg->fq.fqid);
+				BUG_ON(!fq);
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_FQPN:
+				/* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+				fq = get_fq_table_entry(msg->fq.contextB);
+#else
+				fq = (void *)(uintptr_t)msg->fq.contextB;
+#endif
+				fq_state_change(p, fq, msg, verb);
+				if (fq->cb.fqs)
+					fq->cb.fqs(p, fq, msg);
+				break;
+			case QM_MR_VERB_DC_ERN:
+				/* DCP ERN */
+				if (p->cb_dc_ern)
+					p->cb_dc_ern(p, msg);
+				else if (cb_dc_ern)
+					cb_dc_ern(p, msg);
+				else
+					pr_crit_once("Leaking DCP ERNs!\n");
+				break;
+			default:
+				pr_crit("Invalid MR verb 0x%02x\n", verb);
+			}
+		} else {
+			/* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+			fq = get_fq_table_entry(msg->ern.tag);
+#else
+			fq = (void *)(uintptr_t)msg->ern.tag;
+#endif
+			fq->cb.ern(p, fq, msg);
+		}
+		num++;
+		qm_mr_next(&p->p);
+		goto mr_loop;
+mr_done:
+		qm_mr_cci_consume(&p->p, num);
+	}
+	/*
+	 * QM_PIRQ_CSCI has already been cleared, as part of its specific
+	 * processing. If that interrupt source has meanwhile been re-asserted,
+	 * we mustn't clear it here (or in the top-level interrupt handler).
+	 */
+	return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/* remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined. */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+	p->vdqcr_owned = NULL;
+	FQLOCK(fq);
+	fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+	FQUNLOCK(fq);
+	wake_up(&affine_queue);
+}
+
+/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
+ * that would conflict with other things if they ran at the same time on the
+ * same cpu are;
+ *
+ *   (i) setting/clearing vdqcr_owned, and
+ *  (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ *	 vdqcr_owned field (which it does before setting VDQCR), and
+ *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
+ *	 done so that we can't interfere.
+ *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ *	 with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API *except* qman_poll() (as that's the
+ * sole API that could be invoking the callback through this function).
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+					unsigned int poll_limit)
+{
+	const struct qm_dqrr_entry *dq;
+	struct qman_fq *fq;
+	enum qman_cb_dqrr_result res;
+	unsigned int limit = 0;
+
+loop:
+	qm_dqrr_pvb_update(&p->p);
+	dq = qm_dqrr_current(&p->p);
+	if (!dq)
+		goto done;
+	if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+		/* VDQCR: don't trust contextB as the FQ may have been
+		 * configured for h/w consumption and we're draining it
+		 * post-retirement. */
+		fq = p->vdqcr_owned;
+		/* We only set QMAN_FQ_STATE_NE when retiring, so we only need
+		 * to check for clearing it when doing volatile dequeues. It's
+		 * one less thing to check in the critical path (SDQCR). */
+		if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+			fq_clear(fq, QMAN_FQ_STATE_NE);
+		/* this is duplicated from the SDQCR code, but we have stuff to
+		 * do before *and* after this callback, and we don't want
+		 * multiple if()s in the critical path (SDQCR). */
+		res = fq->cb.dqrr(p, fq, dq);
+		if (res == qman_cb_dqrr_stop)
+			goto done;
+		/* Check for VDQCR completion */
+		if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+			clear_vdqcr(p, fq);
+	} else {
+		/* SDQCR: contextB points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+		fq = get_fq_table_entry(dq->contextB);
+#else
+		fq = (void *)(uintptr_t)dq->contextB;
+#endif
+		/* Now let the callback do its stuff */
+		res = fq->cb.dqrr(p, fq, dq);
+		/* The callback can request that we exit without consuming this
+		 * entry nor advancing; */
+		if (res == qman_cb_dqrr_stop)
+			goto done;
+	}
+	/* Interpret 'dq' from a driver perspective. */
+	/* Parking isn't possible unless HELDACTIVE was set. NB,
+	 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+	 * check for HELDACTIVE to cover both. */
+	DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+		(res != qman_cb_dqrr_park));
+	/* Defer just means "skip it, I'll consume it myself later on" */
+	if (res != qman_cb_dqrr_defer)
+		qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
+	/* Move forward */
+	qm_dqrr_next(&p->p);
+	/* Entry processed and consumed, increment our counter. The callback can
+	 * request that we exit after consuming the entry, and we also exit if
+	 * we reach our processing limit, so loop back only if neither of these
+	 * conditions is met. */
+	if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
+		goto loop;
+done:
+	return limit;
+}
+
+u32 qman_irqsource_get(void)
+{
+	/* "irqsource" and "poll" APIs mustn't redirect when sharing, they
+	 * should shut the user out if they are not the primary CPU hosting the
+	 * portal. That's why we use the "raw" interface. */
+	struct qman_portal *p = get_raw_affine_portal();
+	u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_get);
+
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
+{
+	__maybe_unused unsigned long irqflags;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+	if (p->sharing_redirect)
+		return -EINVAL;
+#endif
+	PORTAL_IRQ_LOCK(p, irqflags);
+	set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+	qm_isr_enable_write(&p->p, p->irq_sources);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+int qman_irqsource_add(u32 bits __maybe_unused)
+{
+	struct qman_portal *p = get_raw_affine_portal();
+	int ret;
+
+	ret = qman_p_irqsource_add(p, bits);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_add);
+
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+	__maybe_unused unsigned long irqflags;
+	u32 ier;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+	if (p->sharing_redirect) {
+		put_affine_portal();
+		return -EINVAL;
+	}
+#endif
+	/* Our interrupt handler only processes+clears status register bits that
+	 * are in p->irq_sources. As we're trimming that mask, if one of them
+	 * were to assert in the status register just before we remove it from
+	 * the enable register, there would be an interrupt-storm when we
+	 * release the IRQ lock. So we wait for the enable register update to
+	 * take effect in h/w (by reading it back) and then clear all other bits
+	 * in the status register. Ie. we clear them from ISR once it's certain
+	 * IER won't allow them to reassert. */
+	PORTAL_IRQ_LOCK(p, irqflags);
+	bits &= QM_PIRQ_VISIBLE;
+	clear_bits(bits, &p->irq_sources);
+	qm_isr_enable_write(&p->p, p->irq_sources);
+	ier = qm_isr_enable_read(&p->p);
+	/* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+	 * data-dependency, ie. to protect against re-ordering. */
+	qm_isr_status_clear(&p->p, ~ier);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+int qman_irqsource_remove(u32 bits)
+{
+	struct qman_portal *p = get_raw_affine_portal();
+	int ret;
+
+	ret = qman_p_irqsource_remove(p, bits);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+	return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+	if (cpu < 0) {
+		struct qman_portal *portal = get_raw_affine_portal();
+
+#ifdef FSL_DPA_PORTAL_SHARE
+		BUG_ON(portal->sharing_redirect);
+#endif
+		cpu = portal->config->public_cfg.cpu;
+		put_affine_portal();
+	}
+	BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
+	return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+void *qman_get_affine_portal(int cpu)
+{
+	return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+	int ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+	if (unlikely(p->sharing_redirect))
+		ret = -EINVAL;
+	else
+#endif
+	{
+		BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
+		ret = __poll_portal_fast(p, limit);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+int qman_poll_dqrr(unsigned int limit)
+{
+	struct qman_portal *p = get_poll_portal();
+	int ret;
+
+	ret = qman_p_poll_dqrr(p, limit);
+	put_poll_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_poll_dqrr);
+
+u32 qman_p_poll_slow(struct qman_portal *p)
+{
+	u32 ret;
+
+#ifdef FSL_DPA_PORTAL_SHARE
+	if (unlikely(p->sharing_redirect))
+		ret = (u32)-1;
+	else
+#endif
+	{
+		u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+
+		ret = __poll_portal_slow(p, is);
+		qm_isr_status_clear(&p->p, ret);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_slow);
+
+u32 qman_poll_slow(void)
+{
+	struct qman_portal *p = get_poll_portal();
+	u32 ret;
+
+	ret = qman_p_poll_slow(p);
+	put_poll_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_poll_slow);
+
+/* Legacy wrapper */
+void qman_p_poll(struct qman_portal *p)
+{
+#ifdef FSL_DPA_PORTAL_SHARE
+	if (unlikely(p->sharing_redirect))
+		return;
+#endif
+	if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+		if (!(p->slowpoll--)) {
+			u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+			u32 active = __poll_portal_slow(p, is);
+
+			if (active) {
+				qm_isr_status_clear(&p->p, active);
+				p->slowpoll = SLOW_POLL_BUSY;
+			} else
+				p->slowpoll = SLOW_POLL_IDLE;
+		}
+	}
+	if ((~p->irq_sources) & QM_PIRQ_DQRI)
+		__poll_portal_fast(p, QMAN_POLL_LIMIT);
+}
+EXPORT_SYMBOL(qman_p_poll);
+
+void qman_poll(void)
+{
+	struct qman_portal *p = get_poll_portal();
+
+	qman_p_poll(p);
+	put_poll_portal();
+}
+EXPORT_SYMBOL(qman_poll);
+
+void qman_p_stop_dequeues(struct qman_portal *p)
+{
+	qman_stop_dequeues_ex(p);
+}
+EXPORT_SYMBOL(qman_p_stop_dequeues);
+
+void qman_stop_dequeues(void)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_stop_dequeues(p);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_stop_dequeues);
+
+void qman_p_start_dequeues(struct qman_portal *p)
+{
+	unsigned long irqflags __maybe_unused;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	DPA_ASSERT(p->dqrr_disable_ref > 0);
+	if (!(--p->dqrr_disable_ref))
+		qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_start_dequeues);
+
+void qman_start_dequeues(void)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_start_dequeues(p);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_start_dequeues);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags __maybe_unused;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	pools &= p->config->public_cfg.pools;
+	p->sdqcr |= pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+void qman_static_dequeue_add(u32 pools)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_static_dequeue_add(p, pools);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_add);
+
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
+{
+	unsigned long irqflags __maybe_unused;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	pools &= p->config->public_cfg.pools;
+	p->sdqcr &= ~pools;
+	qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_del);
+
+void qman_static_dequeue_del(u32 pools)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_static_dequeue_del(p, pools);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_del);
+
+u32 qman_p_static_dequeue_get(struct qman_portal *p)
+{
+	return p->sdqcr;
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_get);
+
+u32 qman_static_dequeue_get(void)
+{
+	struct qman_portal *p = get_affine_portal();
+	u32 ret = qman_p_static_dequeue_get(p);
+
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_static_dequeue_get);
+
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+						int park_request)
+{
+	qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+EXPORT_SYMBOL(qman_p_dca);
+
+void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+{
+	struct qman_portal *p = get_affine_portal();
+
+	qman_p_dca(p, dq, park_request);
+	put_affine_portal();
+}
+EXPORT_SYMBOL(qman_dca);
+
+/*******************/
+/* Frame queue API */
+/*******************/
+
+static const char *mcr_result_str(u8 result)
+{
+	switch (result) {
+	case QM_MCR_RESULT_NULL:
+		return "QM_MCR_RESULT_NULL";
+	case QM_MCR_RESULT_OK:
+		return "QM_MCR_RESULT_OK";
+	case QM_MCR_RESULT_ERR_FQID:
+		return "QM_MCR_RESULT_ERR_FQID";
+	case QM_MCR_RESULT_ERR_FQSTATE:
+		return "QM_MCR_RESULT_ERR_FQSTATE";
+	case QM_MCR_RESULT_ERR_NOTEMPTY:
+		return "QM_MCR_RESULT_ERR_NOTEMPTY";
+	case QM_MCR_RESULT_PENDING:
+		return "QM_MCR_RESULT_PENDING";
+	case QM_MCR_RESULT_ERR_BADCOMMAND:
+		return "QM_MCR_RESULT_ERR_BADCOMMAND";
+	}
+	return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+	struct qm_fqd fqd;
+	struct qm_mcr_queryfq_np np;
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+		int ret = qman_alloc_fqid(&fqid);
+
+		if (ret)
+			return ret;
+	}
+	spin_lock_init(&fq->fqlock);
+	fq->fqid = fqid;
+	fq->flags = flags;
+	fq->state = qman_fq_state_oos;
+	fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
+		return -ENOMEM;
+#endif
+	if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+		return 0;
+	/* Everything else is AS_IS support */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+		goto err;
+	}
+	fqd = mcr->queryfq.fqd;
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq_np.fqid = fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+	if (mcr->result != QM_MCR_RESULT_OK) {
+		pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+		goto err;
+	}
+	np = mcr->queryfq_np;
+	/* Phew, have queryfq and queryfq_np results, stitch together
+	 * the FQ object from those. */
+	fq->cgr_groupid = fqd.cgid;
+	switch (np.state & QM_MCR_NP_STATE_MASK) {
+	case QM_MCR_NP_STATE_OOS:
+		break;
+	case QM_MCR_NP_STATE_RETIRED:
+		fq->state = qman_fq_state_retired;
+		if (np.frm_cnt)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		break;
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+		fq->state = qman_fq_state_sched;
+		if (np.state & QM_MCR_NP_STATE_R)
+			fq_set(fq, QMAN_FQ_STATE_CHANGING);
+		break;
+	case QM_MCR_NP_STATE_PARKED:
+		fq->state = qman_fq_state_parked;
+		break;
+	default:
+		DPA_ASSERT(NULL == "invalid FQ state");
+	}
+	if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+		fq->state |= QMAN_FQ_STATE_CGR_EN;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+err:
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+		qman_release_fqid(fqid);
+	return -EIO;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+
+	/* We don't need to lock the FQ as it is a pre-condition that the FQ be
+	 * quiesced. Instead, run some checks. */
+	switch (fq->state) {
+	case qman_fq_state_parked:
+		DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+	case qman_fq_state_oos:
+		if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+			qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+		clear_fq_table_entry(fq->key);
+#endif
+		return;
+	default:
+		break;
+	}
+	DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+	return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+	if (state)
+		*state = fq->state;
+	if (flags)
+		*flags = fq->flags;
+}
+EXPORT_SYMBOL(qman_fq_state);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+		QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+	if ((fq->state != qman_fq_state_oos) &&
+			(fq->state != qman_fq_state_parked))
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+		/* And can't be set at the same time as TDTHRESH */
+		if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+			return -EINVAL;
+	}
+	/* Issue an INITFQ_[PARKED|SCHED] management command */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			((fq->state != qman_fq_state_oos) &&
+				(fq->state != qman_fq_state_parked)))) {
+		FQUNLOCK(fq);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+		return -EBUSY;
+	}
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initfq = *opts;
+	mcc->initfq.fqid = fq->fqid;
+	mcc->initfq.count = 0;
+	/* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+	 * demux pointer. Otherwise, the caller-provided value is allowed to
+	 * stand, don't overwrite it. */
+	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+		dma_addr_t phys_fq;
+
+		mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+		mcc->initfq.fqd.context_b = fq->key;
+#else
+		mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+		/* and the physical address - NB, if the user wasn't trying to
+		 * set CONTEXTA, clear the stashing settings. */
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+			memset(&mcc->initfq.fqd.context_a, 0,
+				sizeof(mcc->initfq.fqd.context_a));
+		} else {
+			phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+						DMA_TO_DEVICE);
+			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+		}
+	}
+	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+		mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
+		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+			mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+			mcc->initfq.fqd.dest.wq = 4;
+		}
+	}
+	qm_mc_commit(&p->p, myverb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		FQUNLOCK(fq);
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+		return -EIO;
+	}
+	if (opts) {
+		if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+			if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+			else
+				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+		}
+		if (opts->we_mask & QM_INITFQ_WE_CGID)
+			fq->cgr_groupid = opts->fqd.cgid;
+	}
+	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+			qman_fq_state_sched : qman_fq_state_parked;
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return 0;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret = 0;
+	u8 res;
+
+	if (fq->state != qman_fq_state_parked)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	/* Issue a ALTERFQ_SCHED management command */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			(fq->state != qman_fq_state_parked))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_sched;
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int rval;
+	u8 res;
+
+	if ((fq->state != qman_fq_state_parked) &&
+			(fq->state != qman_fq_state_sched))
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			(fq->state == qman_fq_state_retired) ||
+				(fq->state == qman_fq_state_oos))) {
+		rval = -EBUSY;
+		goto out;
+	}
+	rval = table_push_fq(p, fq);
+	if (rval)
+		goto out;
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+	res = mcr->result;
+	/* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+	 * and defer the flags until FQRNI or FQRN (respectively) show up. But
+	 * "Friendly" is to process OK immediately, and not set CHANGING. We do
+	 * friendly, otherwise the caller doesn't necessarily have a fully
+	 * "retired" FQ on return even if the retirement was immediate. However
+	 * this does mean some code duplication between here and
+	 * fq_state_change(). */
+	if (likely(res == QM_MCR_RESULT_OK)) {
+		rval = 0;
+		/* Process 'fq' right away, we'll ignore FQRNI */
+		if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+			fq_set(fq, QMAN_FQ_STATE_NE);
+		if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+			fq_set(fq, QMAN_FQ_STATE_ORL);
+		else
+			table_del_fq(p, fq);
+		if (flags)
+			*flags = fq->flags;
+		fq->state = qman_fq_state_retired;
+		if (fq->cb.fqs) {
+			/* Another issue with supporting "immediate" retirement
+			 * is that we're forced to drop FQRNIs, because by the
+			 * time they're seen it may already be "too late" (the
+			 * fq may have been OOS'd and free()'d already). But if
+			 * the upper layer wants a callback whether it's
+			 * immediate or not, we have to fake a "MR" entry to
+			 * look like an FQRNI... */
+			struct qm_mr_entry msg;
+
+			msg.verb = QM_MR_VERB_FQRNI;
+			msg.fq.fqs = mcr->alterfq.fqs;
+			msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+			msg.fq.contextB = fq->key;
+#else
+			msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+			fq->cb.fqs(p, fq, &msg);
+		}
+	} else if (res == QM_MCR_RESULT_PENDING) {
+		rval = 1;
+		fq_set(fq, QMAN_FQ_STATE_CHANGING);
+	} else {
+		rval = -EIO;
+		table_del_fq(p, fq);
+	}
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return rval;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret = 0;
+	u8 res;
+
+	if (fq->state != qman_fq_state_retired)
+		return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+			(fq->state != qman_fq_state_retired))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+	fq->state = qman_fq_state_oos;
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret = 0;
+	u8 res;
+	u8 myverb;
+
+	if ((fq->state == qman_fq_state_oos) ||
+		(fq->state == qman_fq_state_retired) ||
+		(fq->state == qman_fq_state_parked))
+		return -EINVAL;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+	if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+		return -EINVAL;
+#endif
+	/* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	FQLOCK(fq);
+	if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+			(fq->state == qman_fq_state_parked) ||
+			(fq->state == qman_fq_state_oos) ||
+			(fq->state == qman_fq_state_retired))) {
+		ret = -EBUSY;
+		goto out;
+	}
+	mcc = qm_mc_start(&p->p);
+	mcc->alterfq.fqid = fq->fqid;
+	mcc->alterfq.count = 0;
+	myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+	qm_mc_commit(&p->p, myverb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+	res = mcr->result;
+	if (res != QM_MCR_RESULT_OK) {
+		ret = -EIO;
+		goto out;
+	}
+out:
+	FQUNLOCK(fq);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_fq_flow_control);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*fqd = mcr->queryfq.fqd;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK)
+		return -EIO;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_fq);
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->queryfq.fqid = fq->fqid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*np = mcr->queryfq_np;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res == QM_MCR_RESULT_ERR_FQID)
+		return -ERANGE;
+	else if (res != QM_MCR_RESULT_OK)
+		return -EIO;
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res, myverb;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+				 QM_MCR_VERB_QUERYWQ;
+	mcc = qm_mc_start(&p->p);
+	mcc->querywq.channel.id = wq->channel.id;
+	qm_mc_commit(&p->p, myverb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*wq = mcr->querywq;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK) {
+		pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_wq);
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	mcc->querycgr.cgid = cgr->cgrid;
+	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+	res = mcr->result;
+	if (res == QM_MCR_RESULT_OK)
+		*cgrd = mcr->querycgr;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	if (res != QM_MCR_RESULT_OK) {
+		pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+	unsigned long irqflags __maybe_unused;
+	int ret = -EBUSY;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	if (!p->vdqcr_owned) {
+		FQLOCK(fq);
+		if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+			goto escape;
+		fq_set(fq, QMAN_FQ_STATE_VDQCR);
+		FQUNLOCK(fq);
+		p->vdqcr_owned = fq;
+		ret = 0;
+	}
+escape:
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	if (!ret)
+		qm_dqrr_vdqcr_set(&p->p, vdqcr);
+	return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+	int ret;
+
+	*p = get_affine_portal();
+	ret = set_p_vdqcr(*p, fq, vdqcr);
+	put_affine_portal();
+	return ret;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!(ret = set_p_vdqcr(p, fq, vdqcr)));
+	else
+		wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
+	return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+				u32 vdqcr, u32 flags)
+{
+	int ret = 0;
+
+	if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+		ret = wait_event_interruptible(affine_queue,
+				!(ret = set_vdqcr(p, fq, vdqcr)));
+	else
+		wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
+	return ret;
+}
+#endif
+
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+					u32 flags __maybe_unused, u32 vdqcr)
+{
+	int ret;
+
+	if ((fq->state != qman_fq_state_parked) &&
+			(fq->state != qman_fq_state_retired))
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
+	else
+#endif
+		ret = set_p_vdqcr(p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/* NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue. */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_volatile_dequeue);
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+				u32 vdqcr)
+{
+	struct qman_portal *p;
+	int ret;
+
+	if ((fq->state != qman_fq_state_parked) &&
+			(fq->state != qman_fq_state_retired))
+		return -EINVAL;
+	if (vdqcr & QM_VDQCR_FQID_MASK)
+		return -EINVAL;
+	if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+		return -EBUSY;
+	vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_WAIT)
+		ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+	else
+#endif
+		ret = set_vdqcr(&p, fq, vdqcr);
+	if (ret)
+		return ret;
+	/* VDQCR is set */
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+		if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+			/* NB: don't propagate any error - the caller wouldn't
+			 * know whether the VDQCR was issued or not. A signal
+			 * could arrive after returning anyway, so the caller
+			 * can check signal_pending() if that's an issue. */
+			wait_event_interruptible(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+		else
+			wait_event(affine_queue,
+				!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+	if (avail)
+		qm_eqcr_cce_prefetch(&p->p);
+	else
+		qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qman_portal *p = get_affine_portal();
+	u8 avail;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	update_eqcr_ci(p, 0);
+	avail = qm_eqcr_get_fill(&p->p);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return avail == 0;
+}
+EXPORT_SYMBOL(qman_eqcr_is_empty);
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+	if (affine) {
+		unsigned long irqflags __maybe_unused;
+		struct qman_portal *p = get_affine_portal();
+
+		PORTAL_IRQ_LOCK(p, irqflags);
+		p->cb_dc_ern = handler;
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+	} else
+		cb_dc_ern = handler;
+}
+EXPORT_SYMBOL(qman_set_dc_ern);
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+	u8 avail;
+
+	PORTAL_IRQ_LOCK(p, (*irqflags));
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (p->eqci_owned) {
+			PORTAL_IRQ_UNLOCK(p, (*irqflags));
+			return NULL;
+		}
+		p->eqci_owned = fq;
+	}
+#endif
+	if (p->use_eqcr_ci_stashing) {
+		/*
+		 * The stashing case is easy, only update if we need to in
+		 * order to try and liberate ring entries.
+		 */
+		eq = qm_eqcr_start_stash(&p->p);
+	} else {
+		/*
+		 * The non-stashing case is harder, need to prefetch ahead of
+		 * time.
+		 */
+		avail = qm_eqcr_get_avail(&p->p);
+		if (avail < 2)
+			update_eqcr_ci(p, avail);
+		eq = qm_eqcr_start_no_stash(&p->p);
+	}
+
+	if (unlikely(!eq)) {
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+		if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+				(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
+			p->eqci_owned = NULL;
+#endif
+		PORTAL_IRQ_UNLOCK(p, (*irqflags));
+		return NULL;
+	}
+	if (flags & QMAN_ENQUEUE_FLAG_DCA)
+		eq->dca = QM_EQCR_DCA_ENABLE |
+			((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+					QM_EQCR_DCA_PARK : 0) |
+			((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+	eq->fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	eq->tag = fq->key;
+#else
+	eq->tag = (u32)(uintptr_t)fq;
+#endif
+	eq->fd = *fd;
+	return eq;
+}
+
+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+
+	*p = get_affine_portal();
+	eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
+	if (!eq)
+		put_affine_portal();
+	return eq;
+}
+
+#ifdef FSL_DPA_CAN_WAIT
+static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
+
+	if (!eq)
+		qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
+	return eq;
+}
+static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+		wait_event_interruptible(affine_queue,
+			(eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+	else
+		wait_event(affine_queue,
+			(eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+	return eq;
+}
+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
+
+	if (!eq)
+		qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
+	return eq;
+}
+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
+					unsigned long *irqflags __maybe_unused,
+					struct qman_fq *fq,
+					const struct qm_fd *fd,
+					u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+		wait_event_interruptible(affine_queue,
+			(eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+	else
+		wait_event(affine_queue,
+			(eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+	return eq;
+}
+#endif
+
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags)
+{
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue);
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				struct qman_fq *orp, u16 orp_seqnum)
+{
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Process ORP-specifics here */
+	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+	else {
+		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+		if (flags & QMAN_ENQUEUE_FLAG_NESN)
+			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+		else
+			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+	}
+	eq->seqnum = orp_seqnum;
+	eq->orp = orp->fqid;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_orp);
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+			struct qman_fq *orp, u16 orp_seqnum)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* Process ORP-specifics here */
+	if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+		orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+	else {
+		orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+		if (flags & QMAN_ENQUEUE_FLAG_NESN)
+			orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+		else
+			/* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+			orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+	}
+	eq->seqnum = orp_seqnum;
+	eq->orp = orp->fqid;
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+		((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+				0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_orp);
+
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				qman_cb_precommit cb, void *cb_arg)
+{
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* invoke user supplied callback function before writing commit verb */
+	if (cb(cb_arg)) {
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		return -EINVAL;
+	}
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_precommit);
+
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+		u32 flags, qman_cb_precommit cb, void *cb_arg)
+{
+	struct qman_portal *p;
+	struct qm_eqcr_entry *eq;
+	unsigned long irqflags __maybe_unused;
+
+#ifdef FSL_DPA_CAN_WAIT
+	if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+		eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+	else
+#endif
+	eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+	if (!eq)
+		return -EBUSY;
+	/* invoke user supplied callback function before writing commit verb */
+	if (cb(cb_arg)) {
+		PORTAL_IRQ_UNLOCK(p, irqflags);
+		put_affine_portal();
+		return -EINVAL;
+	}
+	/* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+		(flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+	/* Factor the below out, it's used from qman_enqueue_orp() too */
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+	if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+			(flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+		if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+			wait_event_interruptible(affine_queue,
+					(p->eqci_owned != fq));
+		else
+			wait_event(affine_queue, (p->eqci_owned != fq));
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_precommit);
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts)
+{
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	struct qman_portal *p = get_affine_portal();
+	unsigned long irqflags __maybe_unused;
+	u8 res;
+	u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+	PORTAL_IRQ_LOCK(p, irqflags);
+	mcc = qm_mc_start(&p->p);
+	if (opts)
+		mcc->initcgr = *opts;
+	mcc->initcgr.cgid = cgr->cgrid;
+	if (flags & QMAN_CGR_FLAG_USE_INIT)
+		verb = QM_MCC_VERB_INITCGR;
+	qm_mc_commit(&p->p, verb);
+	while (!(mcr = qm_mc_result(&p->p)))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+	res = mcr->result;
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(qman_modify_cgr);
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
+					QM_CHANNEL_SWPORTAL0))
+#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret;
+	struct qman_portal *p;
+
+	/* We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC. */
+	if (cgr->cgrid >= __CGR_NUM)
+		return -EINVAL;
+
+	p = get_affine_portal();
+
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	cgr->chan = p->config->public_cfg.channel;
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+
+	/* if no opts specified, just add it to the list */
+	if (!opts)
+		goto add_list;
+
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)
+		goto release_lock;
+	if (opts)
+		local_opts = *opts;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		local_opts.cgr.cscn_targ_upd_ctrl =
+			QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+	else
+		/* Overwrite TARG */
+		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+							TARG_MASK(p);
+	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+	/* send init if flags indicate so */
+	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+	else
+		ret = qman_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		goto release_lock;
+add_list:
+	list_add(&cgr->node, &p->cgr_cbs);
+
+	/* Determine if newly added object requires its callback to be called */
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret) {
+		/* we can't go back, so proceed and return success, but screen
+		 * and wail to the log file */
+		pr_crit("CGR HW state partially modified\n");
+		ret = 0;
+		goto release_lock;
+	}
+	if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+							cgr->cgrid))
+		cgr->cb(p, cgr, 1);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+					struct qm_mcc_initcgr *opts)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qm_mcc_initcgr local_opts;
+	int ret;
+
+	if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+		pr_warn("This version doesn't support to send CSCN to DCP portal\n");
+		return -EINVAL;
+	}
+	/* We have to check that the provided CGRID is within the limits of the
+	 * data-structures, for obvious reasons. However we'll let h/w take
+	 * care of determining whether it's within the limits of what exists on
+	 * the SoC.
+	 */
+	if (cgr->cgrid >= __CGR_NUM)
+		return -EINVAL;
+
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	if (opts)
+		local_opts = *opts;
+
+	local_opts.cgr.cscn_targ_upd_ctrl = QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+				QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+	local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+	/* send init if flags indicate so */
+	if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+		ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+							&local_opts);
+	else
+		ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+	return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr_to_dcp);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+	unsigned long irqflags __maybe_unused;
+	struct qm_mcr_querycgr cgr_state;
+	struct qm_mcc_initcgr local_opts;
+	int ret = 0;
+	struct qman_cgr *i;
+	struct qman_portal *p = get_affine_portal();
+
+	if (cgr->chan != p->config->public_cfg.channel) {
+		pr_crit("Attempting to delete cgr from different portal "
+			"than it was create: create 0x%x, delete 0x%x\n",
+			cgr->chan, p->config->public_cfg.channel);
+		ret = -EINVAL;
+		goto put_portal;
+	}
+	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+	spin_lock_irqsave(&p->cgr_lock, irqflags);
+	list_del(&cgr->node);
+	/*
+	 * If there are no other CGR objects for this CGRID in the list, update
+	 * CSCN_TARG accordingly
+	 */
+	list_for_each_entry(i, &p->cgr_cbs, node)
+		if ((i->cgrid == cgr->cgrid) && i->cb)
+			goto release_lock;
+	ret = qman_query_cgr(cgr, &cgr_state);
+	if (ret)  {
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+		goto release_lock;
+	}
+	/* Overwrite TARG */
+	local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+		local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+	else
+		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+							 ~(TARG_MASK(p));
+	ret = qman_modify_cgr(cgr, 0, &local_opts);
+	if (ret)
+		/* add back to the list */
+		list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+	put_affine_portal();
+	return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+int qman_set_wpm(int wpm_enable)
+{
+	return qm_set_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_set_wpm);
+
+int qman_get_wpm(int *wpm_enable)
+{
+	return qm_get_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_get_wpm);
+
+
+/* Cleanup FQs */
+static int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
+				 u32 fqid)
+{
+
+	struct qm_mc_command *mcc;
+	struct qm_mc_result *mcr;
+	u8 state;
+	int orl_empty, fq_empty, i, drain = 0;
+	u32 result;
+	u32 channel, wq;
+
+	/* Determine the state of the FQID */
+	mcc = qm_mc_start(portal[0]);
+	mcc->queryfq_np.fqid = fqid;
+	qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
+	while (!(mcr = qm_mc_result(portal[0])))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+	state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+	if (state == QM_MCR_NP_STATE_OOS)
+		return 0; /* Already OOS, no need to do anymore checks */
+
+	/* Query which channel the FQ is using */
+	mcc = qm_mc_start(portal[0]);
+	mcc->queryfq.fqid = fqid;
+	qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
+	while (!(mcr = qm_mc_result(portal[0])))
+		cpu_relax();
+	DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+	/* Need to store these since the MCR gets reused */
+	channel = mcr->queryfq.fqd.dest.channel;
+	wq = mcr->queryfq.fqd.dest.wq;
+
+	switch (state) {
+	case QM_MCR_NP_STATE_TEN_SCHED:
+	case QM_MCR_NP_STATE_TRU_SCHED:
+	case QM_MCR_NP_STATE_ACTIVE:
+	case QM_MCR_NP_STATE_PARKED:
+		orl_empty = 0;
+		mcc = qm_mc_start(portal[0]);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
+		while (!(mcr = qm_mc_result(portal[0])))
+			cpu_relax();
+		DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			   QM_MCR_VERB_ALTER_RETIRE);
+		result = mcr->result; /* Make a copy as we reuse MCR below */
+
+		if (result == QM_MCR_RESULT_PENDING) {
+			/* Need to wait for the FQRN in the message ring, which
+			   will only occur once the FQ has been drained.  In
+			   order for the FQ to drain the portal needs to be set
+			   to dequeue from the channel the FQ is scheduled on */
+			const struct qm_mr_entry *msg;
+			const struct qm_dqrr_entry *dqrr = NULL;
+			int found_fqrn = 0;
+			u16 dequeue_wq = 0;
+
+			/* Flag that we need to drain FQ */
+			drain = 1;
+
+			if (channel >= qm_channel_pool1 &&
+			    channel < (qm_channel_pool1 + 15)) {
+				/* Pool channel, enable the bit in the portal */
+				dequeue_wq = (channel -
+					      qm_channel_pool1 + 1)<<4 | wq;
+			} else if (channel < qm_channel_pool1) {
+				/* Dedicated channel */
+				dequeue_wq = wq;
+			} else {
+				pr_info("Cannot recover FQ 0x%x, it is "
+					"scheduled on channel 0x%x",
+					fqid, channel);
+				return -EBUSY;
+			}
+			/* Set the sdqcr to drain this channel */
+			if (channel < qm_channel_pool1)
+				for (i = 0; i < portal_count; i++)
+					qm_dqrr_sdqcr_set(portal[i],
+						  QM_SDQCR_TYPE_ACTIVE |
+						  QM_SDQCR_CHANNELS_DEDICATED);
+			else
+				for (i = 0; i < portal_count; i++)
+					qm_dqrr_sdqcr_set(
+						portal[i],
+						QM_SDQCR_TYPE_ACTIVE |
+						QM_SDQCR_CHANNELS_POOL_CONV
+						(channel));
+			while (!found_fqrn) {
+				/* Keep draining DQRR while checking the MR*/
+				for (i = 0; i < portal_count; i++) {
+					qm_dqrr_pvb_update(portal[i]);
+					dqrr = qm_dqrr_current(portal[i]);
+					while (dqrr) {
+						qm_dqrr_cdc_consume_1ptr(
+							portal[i], dqrr, 0);
+						qm_dqrr_pvb_update(portal[i]);
+						qm_dqrr_next(portal[i]);
+						dqrr = qm_dqrr_current(
+							portal[i]);
+					}
+					/* Process message ring too */
+					qm_mr_pvb_update(portal[i]);
+					msg = qm_mr_current(portal[i]);
+					while (msg) {
+						if ((msg->verb &
+						     QM_MR_VERB_TYPE_MASK)
+						    == QM_MR_VERB_FQRN)
+							found_fqrn = 1;
+						qm_mr_next(portal[i]);
+						qm_mr_cci_consume_to_current(
+							portal[i]);
+						qm_mr_pvb_update(portal[i]);
+						msg = qm_mr_current(portal[i]);
+					}
+					cpu_relax();
+				}
+			}
+		}
+		if (result != QM_MCR_RESULT_OK &&
+		    result !=  QM_MCR_RESULT_PENDING) {
+			/* error */
+			pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
+			       fqid, result);
+			return -1;
+		}
+		if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+			/* ORL had no entries, no need to wait until the
+			   ERNs come in */
+			orl_empty = 1;
+		}
+		/* Retirement succeeded, check to see if FQ needs
+		   to be drained */
+		if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+			/* FQ is Not Empty, drain using volatile DQ commands */
+			fq_empty = 0;
+			do {
+				const struct qm_dqrr_entry *dqrr = NULL;
+				u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+				qm_dqrr_vdqcr_set(portal[0], vdqcr);
+
+				/* Wait for a dequeue to occur */
+				while (dqrr == NULL) {
+					qm_dqrr_pvb_update(portal[0]);
+					dqrr = qm_dqrr_current(portal[0]);
+					if (!dqrr)
+						cpu_relax();
+				}
+				/* Process the dequeues, making sure to
+				   empty the ring completely */
+				while (dqrr) {
+					if (dqrr->fqid == fqid &&
+					    dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+						fq_empty = 1;
+					qm_dqrr_cdc_consume_1ptr(portal[0],
+								 dqrr, 0);
+					qm_dqrr_pvb_update(portal[0]);
+					qm_dqrr_next(portal[0]);
+					dqrr = qm_dqrr_current(portal[0]);
+				}
+			} while (fq_empty == 0);
+		}
+		for (i = 0; i < portal_count; i++)
+			qm_dqrr_sdqcr_set(portal[i], 0);
+
+		/* Wait for the ORL to have been completely drained */
+		while (orl_empty == 0) {
+			const struct qm_mr_entry *msg;
+
+			qm_mr_pvb_update(portal[0]);
+			msg = qm_mr_current(portal[0]);
+			while (msg) {
+				if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+				    QM_MR_VERB_FQRL)
+					orl_empty = 1;
+				qm_mr_next(portal[0]);
+				qm_mr_cci_consume_to_current(portal[0]);
+				qm_mr_pvb_update(portal[0]);
+				msg = qm_mr_current(portal[0]);
+			}
+			cpu_relax();
+		}
+		mcc = qm_mc_start(portal[0]);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+		while (!(mcr = qm_mc_result(portal[0])))
+			cpu_relax();
+		DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			   QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result != QM_MCR_RESULT_OK) {
+			pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+			       fqid, mcr->result);
+			return -1;
+		}
+		return 0;
+	case QM_MCR_NP_STATE_RETIRED:
+		/* Send OOS Command */
+		mcc = qm_mc_start(portal[0]);
+		mcc->alterfq.fqid = fqid;
+		qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+		while (!(mcr = qm_mc_result(portal[0])))
+			cpu_relax();
+		DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+			   QM_MCR_VERB_ALTER_OOS);
+		if (mcr->result) {
+			pr_err("OOS Failed on FQID 0x%x\n", fqid);
+			return -1;
+		}
+		return 0;
+	case QM_MCR_NP_STATE_OOS:
+		/*  Done */
+		return 0;
+	}
+	return -1;
+}
+
+int qman_shutdown_fq(u32 fqid)
+{
+	struct qman_portal *p;
+	unsigned long irqflags __maybe_unused;
+	int ret;
+	struct qm_portal *low_p;
+
+	p = get_affine_portal();
+	PORTAL_IRQ_LOCK(p, irqflags);
+	low_p = &p->p;
+	ret = qm_shutdown_fq(&low_p, 1, fqid);
+	PORTAL_IRQ_UNLOCK(p, irqflags);
+	put_affine_portal();
+	return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal)
+{
+	return portal->sharing_redirect ? NULL : portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/qman_driver.c b/drivers/soc/fsl/qbman/qman_driver.c
new file mode 100644
index 0000000..728c4d0
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_driver.c
@@ -0,0 +1,83 @@
+/* Copyright 2013 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#include <linux/time.h>
+
+static int __init early_qman_init(void)
+{
+	struct device_node *dn;
+	u32 is_portal_available;
+
+	qman_init();
+
+	is_portal_available = 0;
+	for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+		if (of_device_is_available(dn)) {
+			is_portal_available = 1;
+			break;
+		}
+	}
+
+	if (!qman_have_ccsr() && is_portal_available) {
+		struct qman_fq fq = {.fqid = 1};
+		struct qm_mcr_queryfq_np np;
+		int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
+		struct timespec nowts, diffts, startts = current_kernel_time();
+
+		/* Loop while querying given fqid succeeds or time out */
+		while (1) {
+			err = qman_query_fq_np(&fq, &np);
+			if (!err) {
+				/* success, control-plane has configured QMan */
+				break;
+			} else if (err != -ERANGE) {
+				pr_err("I/O error, continuing anyway\n");
+				break;
+			}
+			nowts = current_kernel_time();
+			diffts = timespec_sub(nowts, startts);
+			if (diffts.tv_sec > 0) {
+				if (!retry--) {
+					pr_err("Time out, control-plane dead?\n");
+					break;
+				}
+				pr_warn("Polling for the control-plane (%d)\n",
+					retry);
+			}
+		}
+	}
+
+	qman_resource_init();
+
+	return 0;
+}
+subsys_initcall(early_qman_init);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000..ad9e3ba
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,629 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW  1
+#define CONFIG_FSL_DPA_PIRQ_FAST  1
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available) */
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+EXPORT_SYMBOL(qm_channel_pme);
+u16 qm_channel_dce = QMAN_CHANNEL_DCE;
+EXPORT_SYMBOL(qm_channel_dce);
+u16 qman_portal_max;
+EXPORT_SYMBOL(qman_portal_max);
+
+/* For these variables, and the portal-initialisation logic, the
+ * comments in bman_driver.c apply here so won't be repeated. */
+static struct qman_portal *shared_portals[NR_CPUS];
+static int num_shared_portals;
+static int shared_portals_idx;
+static LIST_HEAD(unused_pcfgs);
+
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 pools_sdqcr;
+
+#define STR_ERR_NOPROP	    "No '%s' property in node %s\n"
+#define STR_ERR_CELL	    "'%s' is not a %d-cell range in node %s\n"
+#define STR_FQID_RANGE	    "fsl,fqid-range"
+#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
+#define STR_CGRID_RANGE	     "fsl,cgrid-range"
+
+/* A "fsl,fqid-range" node; release the given range to the allocator */
+static __init int fsl_fqid_range_init(struct device_node *node)
+{
+	int ret;
+	const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
+
+	if (!range) {
+		pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
+		return -EINVAL;
+	}
+	qman_seed_fqid_range(range[0], range[1]);
+	pr_info("FQID allocator includes range %d:%d\n",
+		range[0], range[1]);
+	return 0;
+}
+
+/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
+static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
+{
+	int ret;
+	const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+
+	if (!chanid) {
+		pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+		return -EINVAL;
+	}
+	for (ret = 0; ret < chanid[1]; ret++)
+		pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(chanid[0] + ret);
+	return 0;
+}
+
+/* A "fsl,pool-channel-range" node; release the given range to the allocator */
+static __init int fsl_pool_channel_range_init(struct device_node *node)
+{
+	int ret;
+	const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+
+	if (!chanid) {
+		pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+		return -EINVAL;
+	}
+	qman_seed_pool_range(chanid[0], chanid[1]);
+	pr_info("Pool channel allocator includes range %d:%d\n",
+		chanid[0], chanid[1]);
+	return 0;
+}
+
+/* A "fsl,cgrid-range" node; release the given range to the allocator */
+static __init int fsl_cgrid_range_init(struct device_node *node)
+{
+	struct qman_cgr cgr;
+	int ret, errors = 0;
+	const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
+
+	if (!range) {
+		pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
+		return -EINVAL;
+	}
+	if (ret != 8) {
+		pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
+		return -EINVAL;
+	}
+	qman_seed_cgrid_range(range[0], range[1]);
+	pr_info("CGRID allocator includes range %d:%d\n",
+		range[0], range[1]);
+	for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
+		ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
+		if (ret)
+			errors++;
+	}
+	if (errors)
+		pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
+			errors, (errors > 1) ? "s" : "", range[0], range[1]);
+	return 0;
+}
+
+static void qman_get_ip_revision(struct device_node *dn)
+{
+	u16 ip_rev = 0;
+
+	for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+		if (!of_device_is_available(dn))
+			continue;
+		if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
+			pr_err("Rev1.0 on P4080 rev1 is not supported!\n");
+			BUG_ON(1);
+		} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
+			ip_rev = QMAN_REV11;
+			qman_portal_max = 10;
+		} else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
+			ip_rev = QMAN_REV12;
+			qman_portal_max = 10;
+		} else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
+			of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
+			ip_rev = QMAN_REV20;
+			qman_portal_max = 3;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.0.0")) {
+			ip_rev = QMAN_REV30;
+			qman_portal_max = 50;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.0.1")) {
+			ip_rev = QMAN_REV30;
+			qman_portal_max = 25;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.0")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 50;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.1")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 25;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.2")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 18;
+		} else if (of_device_is_compatible(dn,
+						"fsl,qman-portal-3.1.3")) {
+			ip_rev = QMAN_REV31;
+			qman_portal_max = 10;
+		} else {
+			pr_warn("Unknown version in portal node, default to rev1.1\n");
+			ip_rev = QMAN_REV11;
+			qman_portal_max = 10;
+		}
+
+		if (!qman_ip_rev) {
+			if (ip_rev) {
+				qman_ip_rev = ip_rev;
+			} else {
+				pr_warn("Unknown version, default to rev1.1\n");
+				qman_ip_rev = QMAN_REV11;
+			}
+		} else if (ip_rev && (qman_ip_rev != ip_rev))
+			pr_warn("Revision = 0x%04x, but portal '%s' has 0x%04x\n",
+				qman_ip_rev, dn->full_name, ip_rev);
+		if (qman_ip_rev == ip_rev)
+			break;
+	}
+}
+
+/* Parse a portal node, perform generic mapping duties and return the config. It
+ * is not known at this stage for what purpose (or even if) the portal will be
+ * used. */
+static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
+{
+	struct qm_portal_config *pcfg;
+	const u32 *channel;
+	int irq, ret;
+
+	pcfg = kzalloc(sizeof(*pcfg), GFP_KERNEL);
+	if (!pcfg)
+		return NULL;
+
+	/*
+	 * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
+	 * 'struct device' in order to get the PAMU stashing setup and the QMan
+	 * portal [driver] won't function at all without ring stashing
+	 *
+	 * Making the QMan portal driver nice and proper is part of the
+	 * upstreaming effort
+	 */
+	pcfg->dev.bus = &platform_bus_type;
+	pcfg->dev.of_node = node;
+#ifdef CONFIG_IOMMU_API
+	pcfg->dev.archdata.iommu_domain = NULL;
+#endif
+
+	ret = of_address_to_resource(node, DPA_PORTAL_CE,
+				&pcfg->addr_phys[DPA_PORTAL_CE]);
+	if (ret) {
+		pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
+		goto err;
+	}
+	ret = of_address_to_resource(node, DPA_PORTAL_CI,
+				&pcfg->addr_phys[DPA_PORTAL_CI]);
+	if (ret) {
+		pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
+		goto err;
+	}
+
+	channel = of_get_property(node, "fsl,qman-channel-id", &ret);
+	if (!channel || (ret != 4)) {
+		pr_err("Can't get %s property 'fsl,qman-channel-id'\n",
+		       node->full_name);
+		goto err;
+	}
+	pcfg->public_cfg.channel = *channel;
+	pcfg->public_cfg.cpu = -1;
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq == NO_IRQ) {
+		pr_err("Can't get %s property 'interrupts'\n", node->full_name);
+		goto err;
+	}
+	pcfg->public_cfg.irq = irq;
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	/* We need the same LIODN offset for all portals */
+	qman_liodn_fixup(pcfg->public_cfg.channel);
+#endif
+
+	pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_prot(
+				pcfg->addr_phys[DPA_PORTAL_CE].start,
+				resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]),
+				0);
+	pcfg->addr_virt[DPA_PORTAL_CI] = ioremap_prot(
+				pcfg->addr_phys[DPA_PORTAL_CI].start,
+				resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]),
+				_PAGE_GUARDED | _PAGE_NO_CACHE);
+
+	return pcfg;
+err:
+	kfree(pcfg);
+	return NULL;
+}
+
+static struct qm_portal_config *get_pcfg(struct list_head *list)
+{
+	struct qm_portal_config *pcfg;
+
+	if (list_empty(list))
+		return NULL;
+	pcfg = list_entry(list->prev, struct qm_portal_config, list);
+	list_del(&pcfg->list);
+	return pcfg;
+}
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+	int ret;
+	int window_count = 1;
+	struct iommu_domain_geometry geom_attr;
+	struct pamu_stash_attribute stash_attr;
+
+	pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+	if (!pcfg->iommu_domain) {
+		pr_err("%s(): iommu_domain_alloc() failed", __func__);
+		goto _no_iommu;
+	}
+	geom_attr.aperture_start = 0;
+	geom_attr.aperture_end =
+		((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+	geom_attr.force_aperture = true;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+				    &geom_attr);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+				    &window_count);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d", __func__, ret);
+		goto _iommu_domain_free;
+	}
+	stash_attr.cpu = cpu;
+	stash_attr.cache = PAMU_ATTR_CACHE_L1;
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_STASH,
+				    &stash_attr);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d",
+			   __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+					 IOMMU_READ | IOMMU_WRITE);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_window_enable() = %d",
+			   __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+	if (ret < 0) {
+		pr_err("%s(): iommu_device_attach() = %d",
+			   __func__, ret);
+		goto _iommu_domain_free;
+	}
+	ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				    DOMAIN_ATTR_FSL_PAMU_ENABLE,
+				    &window_count);
+	if (ret < 0) {
+		pr_err("%s(): iommu_domain_set_attr() = %d",
+			   __func__, ret);
+		goto _iommu_detach_device;
+	}
+
+_no_iommu:
+#endif
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+		pr_warn("Failed to set the stash request queue\n");
+
+	return;
+
+#ifdef CONFIG_FSL_PAMU
+_iommu_detach_device:
+	iommu_detach_device(pcfg->iommu_domain, NULL);
+_iommu_domain_free:
+	iommu_domain_free(pcfg->iommu_domain);
+	pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+	struct qman_portal *p;
+
+	pcfg->iommu_domain = NULL;
+	portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
+	p = qman_create_affine_portal(pcfg, NULL);
+	if (p) {
+		u32 irq_sources = 0;
+		/* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+		irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+			       QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+		irq_sources |= QM_PIRQ_DQRI;
+#endif
+		qman_p_irqsource_add(p, irq_sources);
+		pr_info("Portal %sinitialised, cpu %d\n",
+			pcfg->public_cfg.is_shared ? "(shared) " : "",
+			pcfg->public_cfg.cpu);
+	} else
+		pr_crit("Portal failure on cpu %d\n", pcfg->public_cfg.cpu);
+	return p;
+}
+
+static void init_slave(int cpu)
+{
+	struct qman_portal *p;
+	struct cpumask oldmask = *tsk_cpus_allowed(current);
+
+	set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
+	p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+	if (!p)
+		pr_err("Slave portal failure on cpu %d\n", cpu);
+	else
+		pr_info("Portal (slave) initialised, cpu %d\n", cpu);
+	set_cpus_allowed_ptr(current, &oldmask);
+	if (shared_portals_idx >= num_shared_portals)
+		shared_portals_idx = 0;
+}
+
+static struct cpumask want_unshared __initdata;
+static struct cpumask want_shared __initdata;
+
+static int __init parse_qportals(char *str)
+{
+	return parse_portals_bootarg(str, &want_shared, &want_unshared,
+				     "qportals");
+}
+__setup("qportals=", parse_qportals);
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+							unsigned int cpu)
+{
+	struct pamu_stash_attribute stash_attr;
+	int ret;
+
+	if (pcfg->iommu_domain) {
+		stash_attr.cpu = cpu;
+		stash_attr.cache = PAMU_ATTR_CACHE_L1;
+		ret = iommu_domain_set_attr(pcfg->iommu_domain,
+				DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+		if (ret < 0) {
+			pr_err("Failed to update pamu stash setting\n");
+			return;
+		}
+	}
+#ifdef CONFIG_FSL_QMAN_CONFIG
+	if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+		pr_warn("Failed to update portal's stash request queue\n");
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+	struct qman_portal *p;
+	const struct qm_portal_config *pcfg;
+
+	p = (struct qman_portal *)affine_portals[cpu];
+	if (p) {
+		pcfg = qman_get_qm_portal_config(p);
+		if (pcfg) {
+			irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+			qman_portal_update_sdest(pcfg, 0);
+		}
+	}
+}
+
+__init int qman_init(void)
+{
+	struct cpumask slave_cpus;
+	struct cpumask unshared_cpus = *cpu_none_mask;
+	struct cpumask shared_cpus = *cpu_none_mask;
+	LIST_HEAD(unshared_pcfgs);
+	LIST_HEAD(shared_pcfgs);
+	struct device_node *dn;
+	struct qm_portal_config *pcfg;
+	struct qman_portal *p;
+	int cpu, ret;
+	struct cpumask offline_cpus;
+
+	/* Initialise the QMan (CCSR) device */
+	for_each_compatible_node(dn, NULL, "fsl,qman") {
+		if (!qman_init_ccsr(dn))
+			pr_info("Err interrupt handler present\n");
+		else
+			pr_err("CCSR setup failed\n");
+	}
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	/* Setup lookup table for FQ demux */
+	ret = qman_setup_fq_lookup_table(qman_fqd_size()/64);
+	if (ret)
+		return ret;
+#endif
+
+	/* Get qman ip revision */
+	qman_get_ip_revision(dn);
+	if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+		qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+		qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+		qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+	}
+
+	/* Parse pool channels into the SDQCR mask. (Must happen before portals
+	 * are initialised.) */
+	for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+		ret = fsl_pool_channel_range_sdqcr(dn);
+		if (ret)
+			return ret;
+	}
+
+	memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
+	/* Initialise portals. See bman_driver.c for comments */
+	for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+		if (!of_device_is_available(dn))
+			continue;
+		pcfg = parse_pcfg(dn);
+		if (pcfg) {
+			pcfg->public_cfg.pools = pools_sdqcr;
+			list_add_tail(&pcfg->list, &unused_pcfgs);
+		}
+	}
+	for_each_possible_cpu(cpu) {
+		if (cpumask_test_cpu(cpu, &want_shared)) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+			cpumask_set_cpu(cpu, &shared_cpus);
+		}
+		if (cpumask_test_cpu(cpu, &want_unshared)) {
+			if (cpumask_test_cpu(cpu, &shared_cpus))
+				continue;
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+		for_each_possible_cpu(cpu) {
+			pcfg = get_pcfg(&unused_pcfgs);
+			if (!pcfg)
+				break;
+			pcfg->public_cfg.cpu = cpu;
+			list_add_tail(&pcfg->list, &unshared_pcfgs);
+			cpumask_set_cpu(cpu, &unshared_cpus);
+		}
+	}
+	cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+	cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+	if (cpumask_empty(&slave_cpus)) {
+		if (!list_empty(&shared_pcfgs)) {
+			cpumask_or(&unshared_cpus, &unshared_cpus,
+				   &shared_cpus);
+			cpumask_clear(&shared_cpus);
+			list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+			INIT_LIST_HEAD(&shared_pcfgs);
+		}
+	} else {
+		if (list_empty(&shared_pcfgs)) {
+			pcfg = get_pcfg(&unshared_pcfgs);
+			if (!pcfg) {
+				pr_crit("No portals available!\n");
+				return 0;
+			}
+			cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+			cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+			list_add_tail(&pcfg->list, &shared_pcfgs);
+		}
+	}
+	list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 0;
+		p = init_pcfg(pcfg);
+	}
+	list_for_each_entry(pcfg, &shared_pcfgs, list) {
+		pcfg->public_cfg.is_shared = 1;
+		p = init_pcfg(pcfg);
+		if (p)
+			shared_portals[num_shared_portals++] = p;
+	}
+	if (!cpumask_empty(&slave_cpus))
+		for_each_cpu(cpu, &slave_cpus)
+			init_slave(cpu);
+	pr_info("Portals initialised\n");
+	cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+	for_each_cpu(cpu, &offline_cpus)
+		qman_offline_cpu(cpu);
+	return 0;
+}
+
+__init int qman_resource_init(void)
+{
+	struct device_node *dn;
+	int ret;
+
+	/* Initialise FQID allocation ranges */
+	for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
+		ret = fsl_fqid_range_init(dn);
+		if (ret)
+			return ret;
+	}
+	/* Initialise CGRID allocation ranges */
+	for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
+		ret = fsl_cgrid_range_init(dn);
+		if (ret)
+			return ret;
+	}
+	/* Parse pool channels into the allocator. (Must happen after portals
+	 * are initialised.) */
+	for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+		ret = fsl_pool_channel_range_init(dn);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000..03c923f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,279 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+#include <asm/fsl_pamu_stash.h>
+
+/* Congestion Groups */
+
+/* This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+struct qman_cgrs {
+	struct __qm_mcr_querycongestion q;
+};
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+	memset(c, 0, sizeof(*c));
+}
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+	memset(c, 0xff, sizeof(*c));
+}
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+	return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+	c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+	c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+	while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
+		;
+	return num;
+}
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+				const struct qman_cgrs *src)
+{
+	*dest = *src;
+}
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.__state;
+	const u32 *_a = a->q.__state;
+	const u32 *_b = b->q.__state;
+
+	for (ret = 0; ret < 8; ret++)
+		*(_d++) = *(_a++) & *(_b++);
+}
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+			const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+	int ret;
+	u32 *_d = dest->q.__state;
+	const u32 *_a = a->q.__state;
+	const u32 *_b = b->q.__state;
+
+	for (ret = 0; ret < 8; ret++)
+		*(_d++) = *(_a++) ^ *(_b++);
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+	qm_isr_status = 0,
+	qm_isr_enable = 1,
+	qm_isr_disable = 2,
+	qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+	/* Corenet portal addresses;
+	 * [0]==cache-enabled, [1]==cache-inhibited. */
+	__iomem void *addr_virt[2];
+	struct resource addr_phys[2];
+	struct device dev;
+	struct iommu_domain *iommu_domain;
+	/* Allow these to be joined in lists */
+	struct list_head list;
+	/* User-visible portal configuration settings */
+	struct qman_portal_config public_cfg;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+extern u16 qman_portal_max;
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+/* Hooks from qman_driver.c to qman_config.c */
+int qman_init_ccsr(struct device_node *node);
+void qman_liodn_fixup(u16 channel);
+int qman_set_sdest(u16 channel, unsigned int cpu_idx);
+size_t qman_fqd_size(void);
+#endif
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+/* Hooks from qman_driver.c in to qman_high.c */
+struct qman_portal *qman_create_portal(
+			struct qman_portal *portal,
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+
+struct qman_portal *qman_create_affine_portal(
+			const struct qm_portal_config *config,
+			const struct qman_cgrs *cgrs);
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+								int cpu);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+void qman_destroy_portal(struct qman_portal *qm);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required. */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+
+/*************************************************/
+/*   QMan s/w corenet portal, low-level i/face	 */
+/*************************************************/
+
+/* Note: most functions are only used by the high-level interface, so are
+ * inlined from qman.h. The stuff below is for use by other parts of the
+ * driver. */
+
+/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ *   Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ *   You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *   priority.
+ * If SOURCE == SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS	0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ	0x40000000
+#define QM_SDQCR_COUNT_EXACT1		0x0
+#define QM_SDQCR_COUNT_UPTO3		0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_SDQCR_TYPE_MASK		0x03000000
+#define QM_SDQCR_TYPE_NULL		0x0
+#define QM_SDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_SDQCR_TYPE_ACTIVE		0x03000000
+#define QM_SDQCR_TOKEN_MASK		0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v)		(((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v)		(((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n)	(n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK		0x00ffffff
+#define QM_VDQCR_FQID(n)		((n) & QM_VDQCR_FQID_MASK)
+
+/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
+ * If MODE==SCHEDULED
+ *   Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
+ *   If CHANNELS,
+ *     Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
+ *     You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ *     priority.
+ *   If SPECIFICWQ,
+ *     Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ *     channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ *     work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ *     same value.
+ * If MODE==UNSCHEDULED
+ *     Choose FQID().
+ */
+#define QM_PDQCR_MODE_SCHEDULED		0x0
+#define QM_PDQCR_MODE_UNSCHEDULED	0x80000000
+#define QM_PDQCR_SCHEDULED_CHANNELS	0x0
+#define QM_PDQCR_SCHEDULED_SPECIFICWQ	0x40000000
+#define QM_PDQCR_COUNT_EXACT1		0x0
+#define QM_PDQCR_COUNT_UPTO3		0x20000000
+#define QM_PDQCR_DEDICATED_PRECEDENCE	0x10000000
+#define QM_PDQCR_TYPE_MASK		0x03000000
+#define QM_PDQCR_TYPE_NULL		0x0
+#define QM_PDQCR_TYPE_PRIO_QOS		0x01000000
+#define QM_PDQCR_TYPE_ACTIVE_QOS	0x02000000
+#define QM_PDQCR_TYPE_ACTIVE		0x03000000
+#define QM_PDQCR_CHANNELS_DEDICATED	0x00008000
+#define QM_PDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
+#define QM_PDQCR_SPECIFICWQ_MASK	0x000000f7
+#define QM_PDQCR_SPECIFICWQ_DEDICATED	0x00000000
+#define QM_PDQCR_SPECIFICWQ_POOL(n)	((n) << 4)
+#define QM_PDQCR_SPECIFICWQ_WQ(n)	(n)
+#define QM_PDQCR_FQID(n)		((n) & 0xffffff)
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL	0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL	0x8000		/* Portal channel */
+#define QM_DQAVAIL_POOL(n)	(0x8000 >> (n))	/* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK		0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define qm_isr_status_read(qm)		__qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m)	__qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm)		__qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v)	__qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm)		__qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v)	__qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm)		__qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm)		__qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+int qman_have_ccsr(void);
+#else
+#define qman_have_ccsr	0
+#endif
+
+__init int qman_init(void);
+__init int qman_resource_init(void);
+
+extern void *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+						struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_utils.c b/drivers/soc/fsl/qbman/qman_utils.c
new file mode 100644
index 0000000..6e5e73c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_utils.c
@@ -0,0 +1,305 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* --- FQID Pool --- */
+
+struct qman_fqid_pool {
+	/* Base and size of the FQID range */
+	u32 fqid_base;
+	u32 total;
+	/* Number of FQIDs currently "allocated" */
+	u32 used;
+	/* Allocation optimisation. When 'used<total', it is the index of an
+	 * available FQID. Otherwise there are no available FQIDs, and this
+	 * will be set when the next deallocation occurs. */
+	u32 next;
+	/* A bit-field representation of the FQID range. */
+	unsigned long *bits;
+};
+
+#define QLONG_BYTES	sizeof(unsigned long)
+#define QLONG_BITS	(QLONG_BYTES * 8)
+/* Number of 'longs' required for the given number of bits */
+#define QNUM_LONGS(b)	(((b) + QLONG_BITS - 1) / QLONG_BITS)
+/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
+#define QNUM_BYTES(b)	(QNUM_LONGS(b) * QLONG_BYTES)
+/* And in bits */
+#define QNUM_BITS(b)	(QNUM_LONGS(b) * QLONG_BITS)
+
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
+{
+	struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+	unsigned int i;
+
+	BUG_ON(!num);
+	if (!pool)
+		return NULL;
+	pool->fqid_base = fqid_start;
+	pool->total = num;
+	pool->used = 0;
+	pool->next = 0;
+	pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
+	if (!pool->bits) {
+		kfree(pool);
+		return NULL;
+	}
+	/* If num is not an even multiple of QLONG_BITS (or even 8, for
+	 * byte-oriented searching) then we fill the trailing bits with 1, to
+	 * make them look allocated (permanently). */
+	for (i = num + 1; i < QNUM_BITS(num); i++)
+		set_bit(i, pool->bits);
+	return pool;
+}
+EXPORT_SYMBOL(qman_fqid_pool_create);
+
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
+{
+	int ret = pool->used;
+
+	kfree(pool->bits);
+	kfree(pool);
+	return ret;
+}
+EXPORT_SYMBOL(qman_fqid_pool_destroy);
+
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
+{
+	int ret;
+
+	if (pool->used == pool->total)
+		return -ENOMEM;
+	*fqid = pool->fqid_base + pool->next;
+	ret = test_and_set_bit(pool->next, pool->bits);
+	BUG_ON(ret);
+	if (++pool->used == pool->total)
+		return 0;
+	pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
+	if (pool->next >= pool->total)
+		pool->next = find_first_zero_bit(pool->bits, pool->total);
+	BUG_ON(pool->next >= pool->total);
+	return 0;
+}
+EXPORT_SYMBOL(qman_fqid_pool_alloc);
+
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
+{
+	int ret;
+
+	fqid -= pool->fqid_base;
+	ret = test_and_clear_bit(fqid, pool->bits);
+	BUG_ON(!ret);
+	if (pool->used-- == pool->total)
+		pool->next = fqid;
+}
+EXPORT_SYMBOL(qman_fqid_pool_free);
+
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
+{
+	return pool->used;
+}
+EXPORT_SYMBOL(qman_fqid_pool_used);
+
+static DECLARE_DPAA_RESOURCE(fqalloc); /* FQID allocator */
+static DECLARE_DPAA_RESOURCE(qpalloc); /* pool-channel allocator */
+static DECLARE_DPAA_RESOURCE(cgralloc); /* CGR ID allocator */
+
+/* FQID allocator front-end */
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&fqalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+static int fq_cleanup(u32 fqid)
+{
+	return qman_shutdown_fq(fqid) == 0;
+}
+
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&fqalloc,
+						  fqid, count, fq_cleanup);
+
+	if (total_invalid)
+		pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
+			fqid, fqid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_fqid_range);
+
+int qman_reserve_fqid_range(u32 fqid, u32 count)
+{
+	return dpaa_resource_reserve(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_reserve_fqid_range);
+
+void qman_seed_fqid_range(u32 fqid, u32 count)
+{
+	dpaa_resource_seed(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_seed_fqid_range);
+
+/* Pool-channel allocator front-end */
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&qpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+static int qpool_cleanup(u32 qp)
+{
+	/* We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose destination channel is the pool-channel being released.
+	 * When a non-OOS FQD is found we attempt to clean it up */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 1;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			BUG_ON(err);
+			if (fqd.dest.channel == qp) {
+				/* The channel is the FQ's target, clean it */
+				if (qman_shutdown_fq(fq.fqid) != 0)
+					/* Couldn't shut down the FQ
+					   so the pool must be leaked */
+					return 0;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+void qman_release_pool_range(u32 qp, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&qpalloc,
+						  qp, count, qpool_cleanup);
+
+	if (total_invalid) {
+		/* Pool channels are almost always used individually */
+		if (count == 1)
+			pr_err("Pool channel 0x%x had %d leaks\n",
+				qp, total_invalid);
+		else
+			pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
+				qp, qp + count - 1, count, total_invalid);
+	}
+}
+EXPORT_SYMBOL(qman_release_pool_range);
+
+void qman_seed_pool_range(u32 poolid, u32 count)
+{
+	dpaa_resource_seed(&qpalloc, poolid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_pool_range);
+
+int qman_reserve_pool_range(u32 poolid, u32 count)
+{
+	return dpaa_resource_reserve(&qpalloc, poolid, count);
+}
+EXPORT_SYMBOL(qman_reserve_pool_range);
+
+
+/* CGR ID allocator front-end */
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+	return dpaa_resource_new(&cgralloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+static int cqr_cleanup(u32 cgrid)
+{
+	/* We query all FQDs starting from
+	 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+	 * whose CGR is the CGR being released.
+	 */
+	struct qman_fq fq = {
+		.fqid = 1
+	};
+	int err;
+
+	do {
+		struct qm_mcr_queryfq_np np;
+
+		err = qman_query_fq_np(&fq, &np);
+		if (err)
+			/* FQID range exceeded, found no problems */
+			return 1;
+		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+			struct qm_fqd fqd;
+
+			err = qman_query_fq(&fq, &fqd);
+			BUG_ON(err);
+			if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+			    (fqd.cgid == cgrid)) {
+				pr_err("CRGID 0x%x is being used by FQID 0x%x,"
+				       " CGR will be leaked\n",
+				       cgrid, fq.fqid);
+				return 1;
+			}
+		}
+		/* Move to the next FQID */
+		fq.fqid++;
+	} while (1);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+	u32 total_invalid = dpaa_resource_release(&cgralloc,
+						  cgrid, count, cqr_cleanup);
+	if (total_invalid)
+		pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
+			cgrid, cgrid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_cgrid_range);
+
+void qman_seed_cgrid_range(u32 cgrid, u32 count)
+{
+	dpaa_resource_seed(&cgralloc, cgrid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_cgrid_range);
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644
index 0000000..2e22987
--- /dev/null
+++ b/include/soc/fsl/qman.h
@@ -0,0 +1,1977 @@
+/* Copyright 2008 - 2015 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/bitops.h>
+
+/* Extra lookup is needed on 64 bit machines */
+#if (BITS_PER_LONG == 64)
+#define CONFIG_FSL_QMAN_FQ_LOOKUP 1
+#endif
+
+/* Enable blocking waits */
+#define FSL_DPA_CAN_WAIT       1
+#define FSL_DPA_CAN_WAIT_SYNC  1
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
+#define QMAN_CHANNEL_PME 0xa0
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
+#define QMAN_CHANNEL_PME_REV3 0x860
+#define QMAN_CHANNEL_DCE 0x8a0
+extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
+extern u16 qm_channel_pme;
+extern u16 qm_channel_dce;
+enum qm_dc_portal {
+	qm_dc_portal_fman0 = 0,
+	qm_dc_portal_fman1 = 1,
+	qm_dc_portal_caam = 2,
+	qm_dc_portal_pme = 3,
+	qm_dc_portal_rman = 4,
+	qm_dc_portal_dce = 5
+};
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI	0x00100000	/* Congestion State Change */
+#define QM_PIRQ_EQCI	0x00080000	/* Enqueue Command Committed */
+#define QM_PIRQ_EQRI	0x00040000	/* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI	0x00020000	/* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI	0x00010000	/* MR Ring (non-empty) */
+/* This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing. */
+#define QM_PIRQ_SLOW	(QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+			 QM_PIRQ_MRI)
+
+/* --- Clock speed --- */
+/* A qman driver instance may or may not know the current qman clock speed.
+ * However, certain CEETM calculations may not be possible if this is not known.
+ * The 'set' function will only succeed (return zero) if the driver did not
+ * already know the clock speed. Likewise, the 'get' function will only succeed
+ * if the driver does know the clock speed (either because it knew when booting,
+ * or was told via 'set'). In cases where software is running on a driver
+ * instance that does not know the clock speed (eg. on a hypervised data-plane),
+ * and the user can obtain the current qman clock speed by other means (eg. from
+ * a message sent from the control-plane), then the 'set' function can be used
+ * to enable rate-calculations in a driver where it would otherwise not be
+ * possible. */
+int qm_get_clock(u64 *clock_hz);
+int qm_set_clock(u64 clock_hz);
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK	0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n)	(0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+	return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID. */
+#define QM_VDQCR_PRECEDENCE_VDQCR	0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR	0x80000000
+#define QM_VDQCR_EXACT			0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK		0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n)	(((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n)	(((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY	QM_VDQCR_NUMFRAMES_SET(0)
+
+
+/* ------------------------------------------------------- */
+/* --- QMan data structures (and associated constants) --- */
+
+/* Represents s/w corenet portal mapped data structures */
+struct qm_eqcr_entry;	/* EQCR (EnQueue Command Ring) entries */
+struct qm_dqrr_entry;	/* DQRR (DeQueue Response Ring) entries */
+struct qm_mr_entry;	/* MR (Message Ring) entries */
+struct qm_mc_command;	/* MC (Management Command) command */
+struct qm_mc_result;	/* MC result */
+
+/* See David Lapp's "Frame formats" document, "dpateam", Jan 07, 2008 */
+#define QM_FD_FORMAT_SG		0x4
+#define QM_FD_FORMAT_LONG	0x2
+#define QM_FD_FORMAT_COMPOUND	0x1
+enum qm_fd_format {
+	/* 'contig' implies a contiguous buffer, whereas 'sg' implies a
+	 * scatter-gather table. 'big' implies a 29-bit length with no offset
+	 * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+	 * implies a s/g-like table, where each entry itself represents a frame
+	 * (contiguous or scatter-gather) and the 29-bit "length" is
+	 * interpreted purely for congestion calculations, ie. a "congestion
+	 * weight". */
+	qm_fd_contig = 0,
+	qm_fd_contig_big = QM_FD_FORMAT_LONG,
+	qm_fd_sg = QM_FD_FORMAT_SG,
+	qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+	qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+/* Capitalised versions are un-typed but can be used in static expressions */
+#define QM_FD_CONTIG	0
+#define QM_FD_CONTIG_BIG QM_FD_FORMAT_LONG
+#define QM_FD_SG	QM_FD_FORMAT_SG
+#define QM_FD_SG_BIG	(QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG)
+#define QM_FD_COMPOUND	QM_FD_FORMAT_COMPOUND
+
+/* See 1.5.1.1: "Frame Descriptor (FD)" */
+struct qm_fd {
+	union {
+		struct {
+			u8 dd:2;	/* dynamic debug */
+			u8 liodn_offset:6;
+			u8 bpid:8;	/* Buffer Pool ID */
+			u8 eliodn_offset:4;
+			u8 __reserved:4;
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u32 addr_lo;	/* low 32-bits of 40-bit address */
+		};
+		struct {
+			u64 __notaddress:24;
+			/* More efficient address accessor */
+			u64 addr:40;
+		};
+		u64 opaque_addr;
+	};
+	/* The 'format' field indicates the interpretation of the remaining 29
+	 * bits of the 32-bit word. For packing reasons, it is duplicated in the
+	 * other union elements. Note, union'd structs are difficult to use with
+	 * static initialisation under gcc, in which case use the "opaque" form
+	 * with one of the macros. */
+	union {
+		/* For easier/faster copying of this part of the fd (eg. from a
+		 * DQRR entry to an EQCR entry) copy 'opaque' */
+		u32 opaque;
+		/* If 'format' is _contig or _sg, 20b length and 9b offset */
+		struct {
+			enum qm_fd_format format:3;
+			u16 offset:9;
+			u32 length20:20;
+		};
+		/* If 'format' is _contig_big or _sg_big, 29b length */
+		struct {
+			enum qm_fd_format _format1:3;
+			u32 length29:29;
+		};
+		/* If 'format' is _compound, 29b "congestion weight" */
+		struct {
+			enum qm_fd_format _format2:3;
+			u32 cong_weight:29;
+		};
+	};
+	union {
+		u32 cmd;
+		u32 status;
+	};
+} __aligned(8);
+#define QM_FD_DD_NULL		0x00
+#define QM_FD_PID_MASK		0x3f
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+	return fd->addr;
+}
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+	return (dma_addr_t)fd->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_fd_addr_set64(fd, v) \
+	do { \
+		struct qm_fd *__fd931 = (fd); \
+		__fd931->addr = v; \
+	} while (0)
+
+/* For static initialisation of FDs (which is complicated by the use of unions
+ * in "struct qm_fd"), use the following macros. Note that;
+ * - 'dd', 'pid' and 'bpid' are ignored because there's no static initialisation
+ *   use-case),
+ * - use capitalised QM_FD_*** formats for static initialisation.
+ */
+#define QM_FD_FMT_20(cmd, addr_hi, addr_lo, fmt, off, len) \
+	{ 0, 0, 0, 0, 0, addr_hi, addr_lo, \
+	{ (((fmt)&0x7) << 29) | (((off)&0x1ff) << 20) | ((len)&0xfffff) }, \
+	{ cmd } }
+#define QM_FD_FMT_29(cmd, addr_hi, addr_lo, fmt, len) \
+	{ 0, 0, 0, 0, 0, addr_hi, addr_lo, \
+	{ (((fmt)&0x7) << 29) | ((len)&0x1fffffff) }, \
+	{ cmd } }
+
+/* See 2.2.1.3 Multi-Core Datapath Acceleration Architecture */
+struct qm_sg_entry {
+	union {
+		struct {
+			u8 __reserved1[3];
+			u8 addr_hi;	/* high 8-bits of 40-bit address */
+			u32 addr_lo;	/* low 32-bits of 40-bit address */
+		};
+		struct {
+			u64 __notaddress:24;
+			u64 addr:40;
+		};
+	};
+	u32 extension:1;	/* Extension bit */
+	u32 final:1;		/* Final bit */
+	u32 length:30;
+	u8 __reserved2;
+	u8 bpid;
+	u16 __reserved3:3;
+	u16 offset:13;
+} __packed;
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+	return sg->addr;
+}
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+	return (dma_addr_t)sg->addr;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_sg_entry_set64(sg, v) \
+	do { \
+		struct qm_sg_entry *__sg931 = (sg); \
+		__sg931->addr = v; \
+	} while (0)
+
+/* See 1.5.8.1: "Enqueue Command" */
+struct qm_eqcr_entry {
+	u8 __dont_write_directly__verb;
+	u8 dca;
+	u16 seqnum;
+	u32 orp;	/* 24-bit */
+	u32 fqid;	/* 24-bit */
+	u32 tag;
+	struct qm_fd fd;
+	u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT		0x80
+#define QM_EQCR_VERB_CMD_MASK		0x61	/* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE	0x01
+#define QM_EQCR_VERB_COLOUR_MASK	0x18	/* 4 possible values; */
+#define QM_EQCR_VERB_COLOUR_GREEN	0x00
+#define QM_EQCR_VERB_COLOUR_YELLOW	0x08
+#define QM_EQCR_VERB_COLOUR_RED		0x10
+#define QM_EQCR_VERB_COLOUR_OVERRIDE	0x18
+#define QM_EQCR_VERB_INTERRUPT		0x04	/* on command consumption */
+#define QM_EQCR_VERB_ORP		0x02	/* enable order restoration */
+#define QM_EQCR_DCA_ENABLE		0x80
+#define QM_EQCR_DCA_PARK		0x40
+#define QM_EQCR_DCA_IDXMASK		0x0f	/* "DQRR::idx" goes here */
+#define QM_EQCR_SEQNUM_NESN		0x8000	/* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS		0x4000	/* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK		0x3fff	/* sequence number goes here */
+#define QM_EQCR_FQID_NULL		0	/* eg. for an ORP seqnum hole */
+
+/* See 1.5.8.2: "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+	u8 verb;
+	u8 stat;
+	u16 seqnum;	/* 15-bit */
+	u8 tok;
+	u8 __reserved2[3];
+	u32 fqid;	/* 24-bit */
+	u32 contextB;
+	struct qm_fd fd;
+	u8 __reserved4[32];
+};
+#define QM_DQRR_VERB_VBIT		0x80
+#define QM_DQRR_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE	0x60	/* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY		0x80	/* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE	0x40	/* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE	0x20	/* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID		0x10	/* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
+
+/* See 1.5.8.3: "ERN Message Response" */
+/* See 1.5.8.4: "FQ State Change Notification" */
+struct qm_mr_entry {
+	u8 verb;
+	union {
+		struct {
+			u8 dca;
+			u16 seqnum;
+			u8 rc;		/* Rejection Code */
+			u32 orp:24;
+			u32 fqid;	/* 24-bit */
+			u32 tag;
+			struct qm_fd fd;
+		} __packed ern;
+		struct {
+			u8 colour:2;	/* See QM_MR_DCERN_COLOUR_* */
+			u8 __reserved1:3;
+			enum qm_dc_portal portal:3;
+			u16 __reserved2;
+			u8 rc;		/* Rejection Code */
+			u32 __reserved3:24;
+			u32 fqid;	/* 24-bit */
+			u32 tag;
+			struct qm_fd fd;
+		} __packed dcern;
+		struct {
+			u8 fqs;		/* Frame Queue Status */
+			u8 __reserved1[6];
+			u32 fqid;	/* 24-bit */
+			u32 contextB;
+			u8 __reserved2[16];
+		} __packed fq;		/* FQRN/FQRNI/FQRL/FQPN */
+	};
+	u8 __reserved2[32];
+} __packed;
+#define QM_MR_VERB_VBIT			0x80
+/* The "ern" VERB bits match QM_EQCR_VERB_*** so aren't reproduced here. ERNs
+ * originating from direct-connect portals ("dcern") use 0x20 as a verb which
+ * would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished from
+ * the other MR types by noting if the 0x20 bit is unset. */
+#define QM_MR_VERB_TYPE_MASK		0x27
+#define QM_MR_VERB_DC_ERN		0x20
+#define QM_MR_VERB_FQRN			0x21
+#define QM_MR_VERB_FQRNI		0x22
+#define QM_MR_VERB_FQRL			0x23
+#define QM_MR_VERB_FQPN			0x24
+#define QM_MR_RC_MASK			0xf0	/* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP		0x00
+#define QM_MR_RC_WRED			0x10
+#define QM_MR_RC_ERROR			0x20
+#define QM_MR_RC_ORPWINDOW_EARLY	0x30
+#define QM_MR_RC_ORPWINDOW_LATE		0x40
+#define QM_MR_RC_FQ_TAILDROP		0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED	0x60
+#define QM_MR_RC_ORP_ZERO		0x70
+#define QM_MR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+#define QM_MR_DCERN_COLOUR_GREEN	0x00
+#define QM_MR_DCERN_COLOUR_YELLOW	0x01
+#define QM_MR_DCERN_COLOUR_RED		0x02
+#define QM_MR_DCERN_COLOUR_OVERRIDE	0x03
+
+/* An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation. */
+struct qm_fqd_stashing {
+	/* See QM_STASHING_EXCL_<...> */
+	u8 exclusive;
+	u8 __reserved1:2;
+	/* Numbers of cachelines */
+	u8 annotation_cl:2;
+	u8 data_cl:2;
+	u8 context_cl:2;
+} __packed;
+struct qm_fqd_taildrop {
+	u16 __reserved1:3;
+	u16 mant:8;
+	u16 exp:5;
+} __packed;
+struct qm_fqd_oac {
+	/* See QM_OAC_<...> */
+	u8 oac:2; /* "Overhead Accounting Control" */
+	u8 __reserved1:6;
+	/* Two's-complement value (-128 to +127) */
+	signed char oal; /* "Overhead Accounting Length" */
+} __packed;
+struct qm_fqd {
+	union {
+		u8 orpc;
+		struct {
+			u8 __reserved1:2;
+			u8 orprws:3;
+			u8 oa:1;
+			u8 olws:2;
+		} __packed;
+	};
+	u8 cgid;
+	u16 fq_ctrl;	/* See QM_FQCTRL_<...> */
+	union {
+		u16 dest_wq;
+		struct {
+			u16 channel:13; /* qm_channel */
+			u16 wq:3;
+		} __packed dest;
+	};
+	u16 __reserved2:1;
+	u16 ics_cred:15;
+	/* For "Initialize Frame Queue" commands, the write-enable mask
+	 * determines whether 'td' or 'oac_init' is observed. For query
+	 * commands, this field is always 'td', and 'oac_query' (below) reflects
+	 * the Overhead ACcounting values. */
+	union {
+		struct qm_fqd_taildrop td;
+		struct qm_fqd_oac oac_init;
+	};
+	u32 context_b;
+	union {
+		/* Treat it as 64-bit opaque */
+		u64 opaque;
+		struct {
+			u32 hi;
+			u32 lo;
+		};
+		/* Treat it as s/w portal stashing config */
+		/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+		struct {
+			struct qm_fqd_stashing stashing;
+			/* 48-bit address of FQ context to
+			 * stash, must be cacheline-aligned */
+			u16 context_hi;
+			u32 context_lo;
+		} __packed;
+	} context_a;
+	struct qm_fqd_oac oac_query;
+} __packed;
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+	return ((u64)fqd->context_a.context_hi << 32) |
+		(u64)fqd->context_a.context_lo;
+}
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+	return (dma_addr_t)qm_fqd_stashing_get64(fqd);
+}
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+	return ((u64)fqd->context_a.hi << 32) |
+		(u64)fqd->context_a.lo;
+}
+/* Macro, so we compile better when 'v' isn't necessarily 64-bit */
+#define qm_fqd_stashing_set64(fqd, v) \
+	do { \
+		struct qm_fqd *__fqd931 = (fqd); \
+		__fqd931->context_a.context_hi = upper_32_bits(v); \
+		__fqd931->context_a.context_lo = lower_32_bits(v); \
+	} while (0)
+#define qm_fqd_context_a_set64(fqd, v) \
+	do { \
+		struct qm_fqd *__fqd931 = (fqd); \
+		__fqd931->context_a.hi = upper_32_bits(v); \
+		__fqd931->context_a.lo = lower_32_bits(v); \
+	} while (0)
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_taildrop_set(struct qm_fqd_taildrop *td, u32 val,
+					int roundup)
+{
+	u32 e = 0;
+	int oddbit = 0;
+
+	if (val > 0xe0000000)
+		return -ERANGE;
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	td->exp = e;
+	td->mant = val;
+	return 0;
+}
+/* and the other direction */
+static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td)
+{
+	return (u32)td->mant << td->exp;
+}
+
+/* See 1.5.2.2: "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK		0x07ff	/* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE		0x0400	/* Congestion Group Enable */
+#define QM_FQCTRL_TDE		0x0200	/* Tail-Drop Enable */
+#define QM_FQCTRL_ORP		0x0100	/* ORP Enable */
+#define QM_FQCTRL_CTXASTASHING	0x0080	/* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH	0x0040	/* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR	0x0008	/* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK	0x0004	/* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE	0x0002	/* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE	0x0001	/* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE	QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See 1.5.6.7.1: "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION	0x04
+#define QM_STASHING_EXCL_DATA		0x02
+#define QM_STASHING_EXCL_CTX		0x01
+
+/* See 1.5.5.3: "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS		0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG		0x1 /* Accounting for Congestion Groups */
+
+/* See 1.5.8.4: "FQ State Change Notification" */
+/* This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ *   MaxTH = MA * (2 ^ Mn)
+ *   Slope = SA / (2 ^ Sn)
+ *    MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+	union {
+		u32 word;
+		struct {
+			u32 MA:8;
+			u32 Mn:5;
+			u32 SA:7; /* must be between 64-127 */
+			u32 Sn:6;
+			u32 Pn:6;
+		} __packed;
+	};
+} __packed;
+/* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ *   CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+	u16 __reserved:3;
+	u16 TA:8;
+	u16 Tn:5;
+} __packed;
+/* This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct. */
+struct __qm_mc_cgr {
+	struct qm_cgr_wr_parm wr_parm_g;
+	struct qm_cgr_wr_parm wr_parm_y;
+	struct qm_cgr_wr_parm wr_parm_r;
+	u8 wr_en_g;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_y;	/* boolean, use QM_CGR_EN */
+	u8 wr_en_r;	/* boolean, use QM_CGR_EN */
+	u8 cscn_en;	/* boolean, use QM_CGR_EN */
+	union {
+		struct {
+			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+			u16 cscn_targ_dcp_low;	/* CSCN_TARG_DCP low-16bits */
+		};
+		u32 cscn_targ;	/* use QM_CGR_TARG_* */
+	};
+	u8 cstd_en;	/* boolean, use QM_CGR_EN */
+	u8 cs;		/* boolean, only used in query response */
+	struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+	u8 mode;	/* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN		0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT	0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP	0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n)	(0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0	0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1	0x00100000 /*			   : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+	return (u64)th->TA << th->Tn;
+}
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+					int roundup)
+{
+	u32 e = 0;
+	int oddbit = 0;
+
+	while (val > 0xff) {
+		oddbit = val & 1;
+		val >>= 1;
+		e++;
+		if (roundup && oddbit)
+			val++;
+	}
+	th->Tn = e;
+	th->TA = val;
+	return 0;
+}
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcc_initfq {
+	u8 __reserved1;
+	u16 we_mask;	/* Write Enable Mask */
+	u32 fqid;	/* 24-bit */
+	u16 count;	/* Initialises 'count+1' FQDs */
+	struct qm_fqd fqd; /* the FQD fields go here */
+	u8 __reserved3[30];
+} __packed;
+struct qm_mcc_queryfq {
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+struct qm_mcc_queryfq_np {
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2[56];
+} __packed;
+struct qm_mcc_alterfq {
+	u8 __reserved1[3];
+	u32 fqid;	/* 24-bit */
+	u8 __reserved2;
+	u8 count;	/* number of consecutive FQID */
+	u8 __reserved3[10];
+	u32 context_b;	/* frame queue context b */
+	u8 __reserved4[40];
+} __packed;
+struct qm_mcc_initcgr {
+	u8 __reserved1;
+	u16 we_mask;	/* Write Enable Mask */
+	struct __qm_mc_cgr cgr;	/* CGR fields */
+	u8 __reserved2[2];
+	u8 cgid;
+	u8 __reserved4[32];
+} __packed;
+struct qm_mcc_cgrtestwrite {
+	u8 __reserved1[2];
+	u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u8 __reserved2[23];
+	u8 cgid;
+	u8 __reserved3[32];
+} __packed;
+struct qm_mcc_querycgr {
+	u8 __reserved1[30];
+	u8 cgid;
+	u8 __reserved2[32];
+} __packed;
+struct qm_mcc_querycongestion {
+	u8 __reserved[63];
+} __packed;
+struct qm_mcc_querywq {
+	u8 __reserved;
+	/* select channel if verb != QUERYWQ_DEDICATED */
+	union {
+		u16 channel_wq; /* ignores wq (3 lsbits) */
+		struct {
+			u16 id:13; /* qm_channel */
+			u16 __reserved1:3;
+		} __packed channel;
+	};
+	u8 __reserved2[60];
+} __packed;
+
+struct qm_mc_command {
+	u8 __dont_write_directly__verb;
+	union {
+		struct qm_mcc_initfq initfq;
+		struct qm_mcc_queryfq queryfq;
+		struct qm_mcc_queryfq_np queryfq_np;
+		struct qm_mcc_alterfq alterfq;
+		struct qm_mcc_initcgr initcgr;
+		struct qm_mcc_cgrtestwrite cgrtestwrite;
+		struct qm_mcc_querycgr querycgr;
+		struct qm_mcc_querycongestion querycongestion;
+		struct qm_mcc_querywq querywq;
+	};
+} __packed;
+#define QM_MCC_VERB_VBIT		0x80
+#define QM_MCC_VERB_MASK		0x7f	/* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED	0x40
+#define QM_MCC_VERB_INITFQ_SCHED	0x41
+#define QM_MCC_VERB_QUERYFQ		0x44
+#define QM_MCC_VERB_QUERYFQ_NP		0x45	/* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ		0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED	0x47
+#define QM_MCC_VERB_ALTER_SCHED		0x48	/* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE		0x49	/* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE	0x4a	/* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS		0x4b	/* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON		0x4d	/* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF	0x4e	/* FQ XOFF */
+#define QM_MCC_VERB_INITCGR		0x50
+#define QM_MCC_VERB_MODIFYCGR		0x51
+#define QM_MCC_VERB_CGRTESTWRITE	0x52
+#define QM_MCC_VERB_QUERYCGR		0x58
+#define QM_MCC_VERB_QUERYCONGESTION	0x59
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK		0x01ff	/* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC		0x0100
+#define QM_INITFQ_WE_ORPC		0x0080
+#define QM_INITFQ_WE_CGID		0x0040
+#define QM_INITFQ_WE_FQCTRL		0x0020
+#define QM_INITFQ_WE_DESTWQ		0x0010
+#define QM_INITFQ_WE_ICSCRED		0x0008
+#define QM_INITFQ_WE_TDTHRESH		0x0004
+#define QM_INITFQ_WE_CONTEXTB		0x0002
+#define QM_INITFQ_WE_CONTEXTA		0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK			0x07ff	/* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G		0x0400
+#define QM_CGR_WE_WR_PARM_Y		0x0200
+#define QM_CGR_WE_WR_PARM_R		0x0100
+#define QM_CGR_WE_WR_EN_G		0x0080
+#define QM_CGR_WE_WR_EN_Y		0x0040
+#define QM_CGR_WE_WR_EN_R		0x0020
+#define QM_CGR_WE_CSCN_EN		0x0010
+#define QM_CGR_WE_CSCN_TARG		0x0008
+#define QM_CGR_WE_CSTD_EN		0x0004
+#define QM_CGR_WE_CS_THRES		0x0002
+#define QM_CGR_WE_MODE			0x0001
+
+/* See 1.5.8.5.1: "Initialize FQ" */
+/* See 1.5.8.5.2: "Query FQ" */
+/* See 1.5.8.5.3: "Query FQ Non-Programmable Fields" */
+/* See 1.5.8.5.4: "Alter FQ State Commands " */
+/* See 1.5.8.6.1: "Initialize/Modify CGR" */
+/* See 1.5.8.6.2: "CGR Test Write" */
+/* See 1.5.8.6.3: "Query CGR" */
+/* See 1.5.8.6.4: "Query Congestion Group State" */
+struct qm_mcr_initfq {
+	u8 __reserved1[62];
+} __packed;
+struct qm_mcr_queryfq {
+	u8 __reserved1[8];
+	struct qm_fqd fqd;	/* the FQD fields are here */
+	u8 __reserved2[30];
+} __packed;
+struct qm_mcr_queryfq_np {
+	u8 __reserved1;
+	u8 state;	/* QM_MCR_NP_STATE_*** */
+	u8 __reserved2;
+	u32 fqd_link:24;
+	u16 __reserved3:2;
+	u16 odp_seq:14;
+	u16 __reserved4:2;
+	u16 orp_nesn:14;
+	u16 __reserved5:1;
+	u16 orp_ea_hseq:15;
+	u16 __reserved6:1;
+	u16 orp_ea_tseq:15;
+	u8 __reserved7;
+	u32 orp_ea_hptr:24;
+	u8 __reserved8;
+	u32 orp_ea_tptr:24;
+	u8 __reserved9;
+	u32 pfdr_hptr:24;
+	u8 __reserved10;
+	u32 pfdr_tptr:24;
+	u8 __reserved11[5];
+	u8 __reserved12:7;
+	u8 is:1;
+	u16 ics_surp;
+	u32 byte_cnt;
+	u8 __reserved13;
+	u32 frm_cnt:24;
+	u32 __reserved14;
+	u16 ra1_sfdr;	/* QM_MCR_NP_RA1_*** */
+	u16 ra2_sfdr;	/* QM_MCR_NP_RA2_*** */
+	u16 __reserved15;
+	u16 od1_sfdr;	/* QM_MCR_NP_OD1_*** */
+	u16 od2_sfdr;	/* QM_MCR_NP_OD2_*** */
+	u16 od3_sfdr;	/* QM_MCR_NP_OD3_*** */
+} __packed;
+struct qm_mcr_alterfq {
+	u8 fqs;		/* Frame Queue Status */
+	u8 __reserved1[61];
+} __packed;
+struct qm_mcr_initcgr {
+	u8 __reserved1[62];
+} __packed;
+struct qm_mcr_cgrtestwrite {
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[3];
+	u32 __reserved3:24;
+	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u32 __reserved4:24;
+	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	u16 lgt;	/* Last Group Tick */
+	u16 wr_prob_g;
+	u16 wr_prob_y;
+	u16 wr_prob_r;
+	u8 __reserved5[8];
+} __packed;
+struct qm_mcr_querycgr {
+	u16 __reserved1;
+	struct __qm_mc_cgr cgr; /* CGR fields */
+	u8 __reserved2[3];
+	u32 __reserved3:24;
+	u32 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */
+	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	u32 __reserved4:24;
+	u32 a_bcnt_hi:8;/* high 8-bits of 40-bit "Average" */
+	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	union {
+		u32 cscn_targ_swp[4];
+		u8 __reserved5[16];
+	};
+} __packed;
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+static inline u64 qm_mcr_cgrtestwrite_i_get64(
+					const struct qm_mcr_cgrtestwrite *q)
+{
+	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_cgrtestwrite_a_get64(
+					const struct qm_mcr_cgrtestwrite *q)
+{
+	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+/* Macro, so we compile better if 'v' isn't always 64-bit */
+#define qm_mcr_querycgr_i_set64(q, v) \
+	do { \
+		struct qm_mcr_querycgr *__q931 = (fd); \
+		__q931->i_bcnt_hi = upper_32_bits(v); \
+		__q931->i_bcnt_lo = lower_32_bits(v); \
+	} while (0)
+#define qm_mcr_querycgr_a_set64(q, v) \
+	do { \
+		struct qm_mcr_querycgr *__q931 = (fd); \
+		__q931->a_bcnt_hi = upper_32_bits(v); \
+		__q931->a_bcnt_lo = lower_32_bits(v); \
+	} while (0)
+struct __qm_mcr_querycongestion {
+	u32 __state[8];
+};
+struct qm_mcr_querycongestion {
+	u8 __reserved[30];
+	/* Access this struct using QM_MCR_QUERYCONGESTION() */
+	struct __qm_mcr_querycongestion state;
+} __packed;
+struct qm_mcr_querywq {
+	union {
+		u16 channel_wq; /* ignores wq (3 lsbits) */
+		struct {
+			u16 id:13; /* qm_channel */
+			u16 __reserved:3;
+		} __packed channel;
+	};
+	u8 __reserved[28];
+	u32 wq_len[8];
+} __packed;
+
+struct qm_mc_result {
+	u8 verb;
+	u8 result;
+	union {
+		struct qm_mcr_initfq initfq;
+		struct qm_mcr_queryfq queryfq;
+		struct qm_mcr_queryfq_np queryfq_np;
+		struct qm_mcr_alterfq alterfq;
+		struct qm_mcr_initcgr initcgr;
+		struct qm_mcr_cgrtestwrite cgrtestwrite;
+		struct qm_mcr_querycgr querycgr;
+		struct qm_mcr_querycongestion querycongestion;
+		struct qm_mcr_querywq querywq;
+	};
+} __packed;
+
+#define QM_MCR_VERB_RRID		0x80
+#define QM_MCR_VERB_MASK		QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED	QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED	QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ		QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP		QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ		QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED	QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED		QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE		QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE	QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS		QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL		0x00
+#define QM_MCR_RESULT_OK		0xf0
+#define QM_MCR_RESULT_ERR_FQID		0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE	0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY	0xf3	/* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL	0xf4
+#define QM_MCR_RESULT_PENDING		0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND	0xff
+#define QM_MCR_NP_STATE_FE		0x10
+#define QM_MCR_NP_STATE_R		0x08
+#define QM_MCR_NP_STATE_MASK		0x07	/* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS		0x00
+#define QM_MCR_NP_STATE_RETIRED		0x01
+#define QM_MCR_NP_STATE_TEN_SCHED	0x02
+#define QM_MCR_NP_STATE_TRU_SCHED	0x03
+#define QM_MCR_NP_STATE_PARKED		0x04
+#define QM_MCR_NP_STATE_ACTIVE		0x05
+#define QM_MCR_NP_PTR_MASK		0x07ff	/* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v)		(((v) >> 14) & 0x3)	/* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v)		(((v) >> 14) & 0x1)	/* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v)		(((v) >> 14) & 0x3)	/* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v)		(((v) >> 14) & 0x3)	/* FQD::NPC */
+#define QM_MCR_FQS_ORLPRESENT		0x02	/* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY		0x01	/* FQ has enqueued frames */
+/* This extracts the state for congestion group 'n' from a query response.
+ * Eg.
+ *   u8 cgr = [...];
+ *   struct qm_mc_result *res = [...];
+ *   printf("congestion group %d congestion state: %d\n", cgr,
+ *	 QM_MCR_QUERYCONGESTION(&res->querycongestion.state, cgr));
+ */
+#define __CGR_WORD(num)		(num >> 5)
+#define __CGR_SHIFT(num)	(num & 0x1f)
+#define __CGR_NUM		(sizeof(struct __qm_mcr_querycongestion) << 3)
+static inline int QM_MCR_QUERYCONGESTION(struct __qm_mcr_querycongestion *p,
+					u8 cgr)
+{
+	return p->__state[__CGR_WORD(cgr)] & (0x80000000 >> __CGR_SHIFT(cgr));
+}
+
+
+/*********************/
+/* Utility interface */
+/*********************/
+
+/* Represents an allocator over a range of FQIDs. NB, accesses are not locked,
+ * spinlock them yourself if needed. */
+struct qman_fqid_pool;
+
+/* Create/destroy a FQID pool, num must be a multiple of 32. NB, _destroy()
+ * always succeeds, but returns non-zero if there were "leaked" FQID
+ * allocations. */
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num);
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool);
+/* Alloc/free a FQID from the range. _alloc() returns zero for success. */
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid);
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid);
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool);
+
+/*******************************************************************/
+/* Managed (aka "shared" or "mux/demux") portal, high-level i/face */
+/*******************************************************************/
+
+	/* Portal and Frame Queues */
+	/* ----------------------- */
+/* Represents a managed portal */
+struct qman_portal;
+
+/* This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down. */
+struct qman_fq;
+
+/* This object type represents a QMan congestion group, it is defined further
+ * down. */
+struct qman_cgr;
+
+struct qman_portal_config {
+	/* If the caller enables DQRR stashing (and thus wishes to operate the
+	 * portal from only one cpu), this is the logical CPU that the portal
+	 * will stash to. Whether stashing is enabled or not, this setting is
+	 * also used for any "core-affine" portals, ie. default portals
+	 * associated to the corresponding cpu. -1 implies that there is no core
+	 * affinity configured. */
+	int cpu;
+	/* portal interrupt line */
+	int irq;
+	/* Is this portal shared? (If so, it has coarser locking and demuxes
+	 * processing on behalf of other CPUs.) */
+	int is_shared;
+	/* The portal's dedicated channel id, use this value for initialising
+	 * frame queues to target this portal when scheduled. */
+	u16 channel;
+	/* A mask of which pool channels this portal has dequeue access to
+	 * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask) */
+	u32 pools;
+};
+
+/* This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume. */
+enum qman_cb_dqrr_result {
+	/* DQRR entry can be consumed */
+	qman_cb_dqrr_consume,
+	/* Like _consume, but requests parking - FQ must be held-active */
+	qman_cb_dqrr_park,
+	/* Does not consume, for DCA mode only. This allows out-of-order
+	 * consumes by explicit calls to qman_dca() and/or the use of implicit
+	 * DCA via EQCR entries. */
+	qman_cb_dqrr_defer,
+	/* Stop processing without consuming this ring entry. Exits the current
+	 * qman_poll_dqrr() or interrupt-handling, as appropriate. If within an
+	 * interrupt handler, the callback would typically call
+	 * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+	 * otherwise the interrupt will reassert immediately. */
+	qman_cb_dqrr_stop,
+	/* Like qman_cb_dqrr_stop, but consumes the current entry. */
+	qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+					struct qman_fq *fq,
+					const struct qm_dqrr_entry *dqrr);
+
+/* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns. */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+				const struct qm_mr_entry *msg);
+
+/* This callback type is used when handling DCP ERNs */
+typedef void (*qman_cb_dc_ern)(struct qman_portal *qm,
+				const struct qm_mr_entry *msg);
+
+/* s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked". */
+enum qman_fq_state {
+	qman_fq_state_oos,
+	qman_fq_state_parked,
+	qman_fq_state_sched,
+	qman_fq_state_retired
+};
+
+/* Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ *     // myfq is allocated and driver_fq callbacks filled in;
+ *     struct my_fq {
+ *	   struct qman_fq base;
+ *	   int an_extra_field;
+ *	   [ ... add other fields to be associated with each FQ ...]
+ *     } *myfq = some_my_fq_allocator();
+ *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ *     // in a dequeue callback, access extra fields from 'fq' via a cast;
+ *     struct my_fq *myfq = (struct my_fq *)fq;
+ *     do_something_with(myfq->an_extra_field);
+ *     [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ *     many cachelines are required to stash 'struct my_fq', to accelerate not
+ *     only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+	qman_cb_dqrr dqrr;	/* for dequeued frames */
+	qman_cb_mr ern;		/* for s/w ERNs */
+	qman_cb_mr fqs;		/* frame-queue state changes*/
+};
+
+struct qman_fq {
+	/* Caller of qman_create_fq() provides these demux callbacks */
+	struct qman_fq_cb cb;
+	/* These are internal to the driver, don't touch. In particular, they
+	 * may change, be removed, or extended (so you shouldn't rely on
+	 * sizeof(qman_fq) being a constant). */
+	spinlock_t fqlock;
+	u32 fqid;
+	volatile unsigned long flags;
+	enum qman_fq_state state;
+	int cgr_groupid;
+	struct rb_node node;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+	u32 key;
+#endif
+};
+
+/* This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit. */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+			struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+	/* Set these prior to qman_create_cgr() */
+	u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+	qman_cb_cgr cb;
+	/* These are private to the driver */
+	u16 chan; /* portal channel this object is created on */
+	struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE	     0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY	     0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_LOCKED	     0x00000008 /* multi-core locking */
+#define QMAN_FQ_FLAG_AS_IS	     0x00000010 /* query h/w state */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_destroy_fq() */
+#define QMAN_FQ_DESTROY_PARKED	     0x00000001 /* FQ can be parked or OOS */
+
+/* Flags from qman_fq_state() */
+#define QMAN_FQ_STATE_CHANGING	     0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE	     0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL	     0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS	     0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN	     0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR	     0x08000000 /* being volatile dequeued */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED	     0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL	     0x00000004 /* set dest portal */
+
+/* Flags to qman_volatile_dequeue() */
+#ifdef FSL_DPA_CAN_WAIT
+#define QMAN_VOLATILE_FLAG_WAIT	     0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
+#endif
+
+/* Flags to qman_enqueue(). NB, the strange numbering is to align with hardware,
+ * bit-wise. (NB: the PME API is sensitive to these precise numberings too, so
+ * any change here should be audited in PME.) */
+#ifdef FSL_DPA_CAN_WAIT
+#define QMAN_ENQUEUE_FLAG_WAIT	     0x00010000 /* wait if EQCR is full */
+#define QMAN_ENQUEUE_FLAG_WAIT_INT   0x00020000 /* if wait, interruptible? */
+#ifdef FSL_DPA_CAN_WAIT_SYNC
+#define QMAN_ENQUEUE_FLAG_WAIT_SYNC  0x00000004 /* if wait, until consumed? */
+#endif
+#endif
+#define QMAN_ENQUEUE_FLAG_WATCH_CGR  0x00080000 /* watch congestion state */
+#define QMAN_ENQUEUE_FLAG_DCA	     0x00008000 /* perform enqueue-DCA */
+#define QMAN_ENQUEUE_FLAG_DCA_PARK   0x00004000 /* If DCA, requests park */
+#define QMAN_ENQUEUE_FLAG_DCA_PTR(p)		/* If DCA, p is DQRR entry */ \
+		(((u32)(p) << 2) & 0x00000f00)
+#define QMAN_ENQUEUE_FLAG_C_GREEN    0x00000000 /* choose one C_*** flag */
+#define QMAN_ENQUEUE_FLAG_C_YELLOW   0x00000008
+#define QMAN_ENQUEUE_FLAG_C_RED	     0x00000010
+#define QMAN_ENQUEUE_FLAG_C_OVERRIDE 0x00000018
+/* For the ORP-specific qman_enqueue_orp() variant;
+ * - this flag indicates "Not Last In Sequence", ie. all but the final fragment
+ *   of a frame. */
+#define QMAN_ENQUEUE_FLAG_NLIS	     0x01000000
+/* - this flag performs no enqueue but fills in an ORP sequence number that
+ *   would otherwise block it (eg. if a frame has been dropped). */
+#define QMAN_ENQUEUE_FLAG_HOLE	     0x02000000
+/* - this flag performs no enqueue but advances NESN to the given sequence
+ *   number. */
+#define QMAN_ENQUEUE_FLAG_NESN	     0x04000000
+
+/* Flags to qman_modify_cgr() */
+#define QMAN_CGR_FLAG_USE_INIT	     0x00000001
+#define QMAN_CGR_MODE_FRAME	     0x00000001
+
+	/* Portal Management */
+	/* ----------------- */
+/**
+ * qman_get_portal_config - get portal configuration settings
+ *
+ * This returns a read-only view of the current cpu's affine portal settings.
+ */
+const struct qman_portal_config *qman_get_portal_config(void);
+
+/**
+ * qman_irqsource_get - return the portal work that is interrupt-driven
+ *
+ * Returns a bitmask of QM_PIRQ_**I processing sources that are currently
+ * enabled for interrupt handling on the current cpu's affine portal. These
+ * sources will trigger the portal interrupt and the interrupt handler (or a
+ * tasklet/bottom-half it defers to) will perform the corresponding processing
+ * work. The qman_poll_***() functions will only process sources that are not in
+ * this bitmask. If the current CPU is sharing a portal hosted on another CPU,
+ * this always returns zero.
+ */
+u32 qman_irqsource_get(void);
+
+/**
+ * qman_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions). Returns zero for success, or
+ * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_add(u32 bits);
+
+/**
+ * qman_irqsource_remove - remove processing sources from being interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions. Returns zero for success,
+ * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
+ */
+int qman_irqsource_remove(u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ */
+void *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * Ie. the value returned by qman_irqsource_get() should not include
+ * QM_PIRQ_DQRI. If the current CPU is sharing a portal hosted on another CPU,
+ * this function will return -EINVAL, otherwise the return value is >=0 and
+ * represents the number of DQRR entries processed.
+ */
+int qman_poll_dqrr(unsigned int limit);
+
+/**
+ * qman_poll_slow - process anything (except DQRR) that isn't interrupt-driven.
+ *
+ * This function does any portal processing that isn't interrupt-driven. If the
+ * current CPU is sharing a portal hosted on another CPU, this function will
+ * return (u32)-1, otherwise the return value is a bitmask of QM_PIRQ_* sources
+ * indicating what interrupt sources were actually processed by the call.
+ */
+u32 qman_poll_slow(void);
+
+/**
+ * qman_poll - legacy wrapper for qman_poll_dqrr() and qman_poll_slow()
+ *
+ * Dispatcher logic on a cpu can use this to trigger any maintenance of the
+ * affine portal. There are two classes of portal processing in question;
+ * fast-path (which involves demuxing dequeue ring (DQRR) entries and tracking
+ * enqueue ring (EQCR) consumption), and slow-path (which involves EQCR
+ * thresholds, congestion state changes, etc). This function does whatever
+ * processing is not triggered by interrupts.
+ *
+ * Note, if DQRR and some slow-path processing are poll-driven (rather than
+ * interrupt-driven) then this function uses a heuristic to determine how often
+ * to run slow-path processing - as slow-path processing introduces at least a
+ * minimum latency each time it is run, whereas fast-path (DQRR) processing is
+ * close to zero-cost if there is no work to be done. Applications can tune this
+ * behaviour themselves by using qman_poll_dqrr() and qman_poll_slow() directly
+ * rather than going via this wrapper.
+ */
+void qman_poll(void);
+
+/**
+ * qman_stop_dequeues - Stop h/w dequeuing to the s/w portal
+ *
+ * Disables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_stop_dequeues(void);
+
+/**
+ * qman_start_dequeues - (Re)start h/w dequeuing to the s/w portal
+ *
+ * Enables DQRR processing of the portal. This is reference-counted, so
+ * qman_start_dequeues() must be called as many times as qman_stop_dequeues() to
+ * truly re-enable dequeuing.
+ */
+void qman_start_dequeues(void);
+
+/**
+ * qman_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_static_dequeue_add(u32 pools);
+
+/**
+ * qman_static_dequeue_del - Remove pool channels from the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Removes a set of pool channels from the portal's static dequeue command
+ * register (SDQCR). The requested pools are limited to those the portal has
+ * dequeue access to.
+ */
+void qman_static_dequeue_del(u32 pools);
+
+/**
+ * qman_static_dequeue_get - return the portal's current SDQCR
+ *
+ * Returns the portal's current static dequeue command register (SDQCR). The
+ * entire register is returned, so if only the currently-enabled pool channels
+ * are desired, mask the return value with QM_SDQCR_CHANNELS_POOL_MASK.
+ */
+u32 qman_static_dequeue_get(void);
+
+/**
+ * qman_dca - Perform a Discrete Consumption Acknowledgment
+ * @dq: the DQRR entry to be consumed
+ * @park_request: indicates whether the held-active @fq should be parked
+ *
+ * Only allowed in DCA-mode portals, for DQRR entries whose handler callback had
+ * previously returned 'qman_cb_dqrr_defer'. NB, as with the other APIs, this
+ * does not take a 'portal' argument but implies the core affine portal from the
+ * cpu that is currently executing the function. For reasons of locking, this
+ * function must be called from the same CPU as that which processed the DQRR
+ * entry in the first place.
+ */
+void qman_dca(struct qm_dqrr_entry *dq, int park_request);
+
+/**
+ * qman_eqcr_is_empty - Determine if portal's EQCR is empty
+ *
+ * For use in situations where a cpu-affine caller needs to determine when all
+ * enqueues for the local portal have been processed by QMan but can't use the
+ * QMAN_ENQUEUE_FLAG_WAIT_SYNC flag to do this from the final qman_enqueue().
+ * The function forces tracking of EQCR consumption (which normally doesn't
+ * happen until enqueue processing needs to find space to put new enqueue
+ * commands), and returns zero if the ring still has unprocessed entries,
+ * non-zero if it is empty.
+ */
+int qman_eqcr_is_empty(void);
+
+/**
+ * qman_set_dc_ern - Set the handler for DCP enqueue rejection notifications
+ * @handler: callback for processing DCP ERNs
+ * @affine: whether this handler is specific to the locally affine portal
+ *
+ * If a hardware block's interface to QMan (ie. its direct-connect portal, or
+ * DCP) is configured not to receive enqueue rejections, then any enqueues
+ * through that DCP that are rejected will be sent to a given software portal.
+ * If @affine is non-zero, then this handler will only be used for DCP ERNs
+ * received on the portal affine to the current CPU. If multiple CPUs share a
+ * portal and they all call this function, they will be setting the handler for
+ * the same portal! If @affine is zero, then this handler will be global to all
+ * portals handled by this instance of the driver. Only those portals that do
+ * not have their own affine handler will use the global handler.
+ */
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine);
+
+	/* FQ management */
+	/* ------------- */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller. If the AS_IS flag is specified, management commands
+ * will be used on portal @p to query state for frame queue @fqid and construct
+ * a frame queue object based on that, rather than assuming/requiring that it be
+ * Out of Service.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ * @flags: bit-mask of QMAN_FQ_FREE_*** options
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' state unless the QMAN_FQ_FREE_PARKED flag
+ * is specified, in which case it may also be in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq, u32 flags);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_fq_state - Queries the state of a FQ object
+ * @fq: the frame queue object to query
+ * @state: pointer to state enum to return the FQ scheduling state
+ * @flags: pointer to state flags to receive QMAN_FQ_STATE_*** bitmask
+ *
+ * Queries the state of the FQ object, without performing any h/w commands.
+ * This captures the state, as seen by the driver, at the time the function
+ * executes.
+ */
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ *     initialised to a value used by the driver for demux.
+ *   - if context_b is initialised for demux, so is context_a in case stashing
+ *     is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (as per qman_fq_state) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_flow_control - Set the XON/XOFF state of a FQ
+ * @fq: the frame queue object to be set to XON/XOFF state, must not be 'oos',
+ * or 'retired' or 'parked' state
+ * @xon: boolean to set fq in XON or XOFF state
+ *
+ * The frame should be in Tentatively Scheduled state or Truly Schedule sate,
+ * otherwise the IFSI interrupt will be asserted.
+ */
+int qman_fq_flow_control(struct qman_fq *fq, int xon);
+
+/**
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
+/**
+ * qman_query_wq - Queries work queue lengths
+ * @query_dedicated: If non-zero, query length of WQs in the channel dedicated
+ *		to this software portal. Otherwise, query length of WQs in a
+ *		channel	 specified in wq.
+ * @wq: storage for the queried WQs lengths. Also specified the channel to
+ *	to query if query_dedicated is zero.
+ */
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq);
+
+/**
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" field of the "struct qm_dqrr_entry" passed to the FQ's dequeue
+ * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from the
+ * "flags" retrieved from qman_fq_state().
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full
+ * (and FLAG_WAIT not specified), congestion avoidance (FLAG_WATCH_CGR
+ * specified), etc. If the ring is full and FLAG_WAIT is specified, this
+ * function will block. If FLAG_INTERRUPT is set, the EQCI bit of the portal
+ * interrupt will assert when QMan consumes the EQCR entry (subject to "status
+ * disable", "enable", and "inhibit" registers). If FLAG_DCA is set, QMan will
+ * perform an implied "discrete consumption acknowledgment" on the dequeue
+ * ring's (DQRR) entry, at the ring index specified by the FLAG_DCA_IDX(x)
+ * macro. (As an alternative to issuing explicit DCA actions on DQRR entries,
+ * this implicit DCA can delay the release of a "held active" frame queue
+ * corresponding to a DQRR entry until QMan consumes the EQCR entry - providing
+ * order-preservation semantics in packet-forwarding scenarios.) If FLAG_DCA is
+ * set, then FLAG_DCA_PARK can also be set to imply that the DQRR consumption
+ * acknowledgment should "park request" the "held active" frame queue. Ie.
+ * when the portal eventually releases that frame queue, it will be left in the
+ * Parked state rather than Tentatively Scheduled or Truly Scheduled. If the
+ * portal is watching congestion groups, the QMAN_ENQUEUE_FLAG_WATCH_CGR flag
+ * is requested, and the FQ is a member of a congestion group, then this
+ * function returns -EAGAIN if the congestion group is currently congested.
+ * Note, this does not eliminate ERNs, as the async interface means we can be
+ * sending enqueue commands to an un-congested FQ that becomes congested before
+ * the enqueue commands are processed, but it does minimise needless thrashing
+ * of an already busy hardware resource by throttling many of the to-be-dropped
+ * enqueues "at the source".
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
+
+typedef int (*qman_cb_precommit) (void *arg);
+/**
+ * qman_enqueue_precommit - Enqueue a frame to a frame queue and call cb
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @cb: user supplied callback function to invoke before writing commit verb.
+ * @cb_arg: callback function argument
+ *
+ * This is similar to qman_enqueue except that it will invoke a user supplied
+ * callback function just before writng the commit verb. This is useful
+ * when the user want to do something *just before* enqueuing the request and
+ * the enqueue can't fail.
+ */
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+		u32 flags, qman_cb_precommit cb, void *cb_arg);
+
+/**
+ * qman_enqueue_orp - Enqueue a frame to a frame queue using an ORP
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ * @flags: bit-mask of QMAN_ENQUEUE_FLAG_*** options
+ * @orp: the frame queue object used as an order restoration point.
+ * @orp_seqnum: the sequence number of this frame in the order restoration path
+ *
+ * Similar to qman_enqueue(), but with the addition of an Order Restoration
+ * Point (@orp) and corresponding sequence number (@orp_seqnum) for this
+ * enqueue operation to employ order restoration. Each frame queue object acts
+ * as an Order Definition Point (ODP) by providing each frame dequeued from it
+ * with an incrementing sequence number, this value is generally ignored unless
+ * that sequence of dequeued frames will need order restoration later. Each
+ * frame queue object also encapsulates an Order Restoration Point (ORP), which
+ * is a re-assembly context for re-ordering frames relative to their sequence
+ * numbers as they are enqueued. The ORP does not have to be within the frame
+ * queue that receives the enqueued frame, in fact it is usually the frame
+ * queue from which the frames were originally dequeued. For the purposes of
+ * order restoration, multiple frames (or "fragments") can be enqueued for a
+ * single sequence number by setting the QMAN_ENQUEUE_FLAG_NLIS flag for all
+ * enqueues except the final fragment of a given sequence number. Ordering
+ * between sequence numbers is guaranteed, even if fragments of different
+ * sequence numbers are interlaced with one another. Fragments of the same
+ * sequence number will retain the order in which they are enqueued. If no
+ * enqueue is to performed, QMAN_ENQUEUE_FLAG_HOLE indicates that the given
+ * sequence number is to be "skipped" by the ORP logic (eg. if a frame has been
+ * dropped from a sequence), or QMAN_ENQUEUE_FLAG_NESN indicates that the given
+ * sequence number should become the ORP's "Next Expected Sequence Number".
+ *
+ * Side note: a frame queue object can be used purely as an ORP, without
+ * carrying any frames at all. Care should be taken not to deallocate a frame
+ * queue object that is being actively used as an ORP, as a future allocation
+ * of the frame queue object may start using the internal ORP before the
+ * previous use has finished.
+ */
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+			struct qman_fq *orp, u16 orp_seqnum);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count FQIDs
+ *
+ * Returns the number of frame queues allocated, or a negative error code. If
+ * @partial is non zero, the allocation request may return a smaller range of
+ * FQs than requested (though alignment will be as requested). If @partial is
+ * zero, the return value will either be 'count' or negative.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_fqid(u32 *result)
+{
+	int ret = qman_alloc_fqid_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_fqid_range - Release the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ *
+ * This function can also be used to seed the allocator with ranges of FQIDs
+ * that it can subsequently allocate from.
+ */
+void qman_release_fqid_range(u32 fqid, unsigned int count);
+static inline void qman_release_fqid(u32 fqid)
+{
+	qman_release_fqid_range(fqid, 1);
+}
+
+void qman_seed_fqid_range(u32 fqid, unsigned int count);
+
+
+int qman_shutdown_fq(u32 fqid);
+
+/**
+ * qman_reserve_fqid_range - Reserve the specified range of frame queue IDs
+ * @fqid: the base FQID of the range to deallocate
+ * @count: the number of FQIDs in the range
+ */
+int qman_reserve_fqid_range(u32 fqid, unsigned int count);
+static inline int qman_reserve_fqid(u32 fqid)
+{
+	return qman_reserve_fqid_range(fqid, 1);
+}
+
+	/* Pool-channel management */
+	/* ----------------------- */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of pool-channel IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_pool(u32 *result)
+{
+	int ret = qman_alloc_pool_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_pool_range - Release the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to deallocate
+ * @count: the number of pool-channel IDs in the range
+ */
+void qman_release_pool_range(u32 id, unsigned int count);
+static inline void qman_release_pool(u32 id)
+{
+	qman_release_pool_range(id, 1);
+}
+
+/**
+ * qman_reserve_pool_range - Reserve the specified range of pool-channel IDs
+ * @id: the base pool-channel ID of the range to reserve
+ * @count: the number of pool-channel IDs in the range
+ */
+int qman_reserve_pool_range(u32 id, unsigned int count);
+static inline int qman_reserve_pool(u32 id)
+{
+	return qman_reserve_pool_range(id, 1);
+}
+
+void qman_seed_pool_range(u32 id, unsigned int count);
+
+	/* CGR management */
+	/* -------------- */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_create_cgr_to_dcp - Register a congestion group object to DCP portal
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @dcp_portal: the DCP portal to which the cgr object is registered.
+ * @opts: optional state of CGR settings
+ *
+ */
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+				struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_modify_cgr - Modify CGR fields
+ * @cgr: the 'cgr' object to modify
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: the CGR-modification settings
+ *
+ * The @opts parameter comes from the low-level portal API, and can be NULL.
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver, in particular the 'cgrid' field is ignored (this operation
+ * only affects the given CGR object). If @flags contains
+ * QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset any
+ * unspecified parameters) will be used rather than a modify hw hardware (which
+ * only modifies the specified parameters).
+ */
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+			struct qm_mcc_initcgr *opts);
+
+/**
+* qman_query_cgr - Queries CGR fields
+* @cgr: the 'cgr' object to query
+* @result: storage for the queried congestion group record
+*/
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *result);
+
+/**
+ * qman_query_congestion - Queries the state of all congestion groups
+ * @congestion: storage for the queried state of all congestion groups
+ */
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ * @align: required alignment of the allocated range
+ * @partial: non-zero if the API can return fewer than @count
+ *
+ * Returns the number of CGR IDs allocated, or a negative error code.
+ * If @partial is non zero, the allocation request may return a smaller range of
+ * than requested (though alignment will be as requested). If @partial is zero,
+ * the return value will either be 'count' or negative.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
+static inline int qman_alloc_cgrid(u32 *result)
+{
+	int ret = qman_alloc_cgrid_range(result, 1, 0, 0);
+
+	return (ret > 0) ? 0 : ret;
+}
+
+/**
+ * qman_release_cgrid_range - Release the specified range of CGR IDs
+ * @id: the base CGR ID of the range to deallocate
+ * @count: the number of CGR IDs in the range
+ */
+void qman_release_cgrid_range(u32 id, unsigned int count);
+static inline void qman_release_cgrid(u32 id)
+{
+	qman_release_cgrid_range(id, 1);
+}
+
+/**
+ * qman_reserve_cgrid_range - Reserve the specified range of CGR ID
+ * @id: the base CGR ID of the range to reserve
+ * @count: the number of CGR IDs in the range
+ */
+int qman_reserve_cgrid_range(u32 id, unsigned int count);
+static inline int qman_reserve_cgrid(u32 id)
+{
+	return qman_reserve_cgrid_range(id, 1);
+}
+
+void qman_seed_cgrid_range(u32 id, unsigned int count);
+
+
+	/* Helpers */
+	/* ------- */
+/**
+ * qman_poll_fq_for_init - Check if an FQ has been initialised from OOS
+ * @fqid: the FQID that will be initialised by other s/w
+ *
+ * In many situations, a FQID is provided for communication between s/w
+ * entities, and whilst the consumer is responsible for initialising and
+ * scheduling the FQ, the producer(s) generally create a wrapper FQ object using
+ * and only call qman_enqueue() (no FQ initialisation, scheduling, etc). Ie;
+ *     qman_create_fq(..., QMAN_FQ_FLAG_NO_MODIFY, ...);
+ * However, data can not be enqueued to the FQ until it is initialised out of
+ * the OOS state - this function polls for that condition. It is particularly
+ * useful for users of IPC functions - each endpoint's Rx FQ is the other
+ * endpoint's Tx FQ, so each side can initialise and schedule their Rx FQ object
+ * and then use this API on the (NO_MODIFY) Tx FQ object in order to
+ * synchronise. The function returns zero for success, +1 if the FQ is still in
+ * the OOS state, or negative if there was an error.
+ */
+static inline int qman_poll_fq_for_init(struct qman_fq *fq)
+{
+	struct qm_mcr_queryfq_np np;
+	int err;
+
+	err = qman_query_fq_np(fq, &np);
+	if (err)
+		return err;
+	if ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS)
+		return 1;
+	return 0;
+}
+
+/**
+ * qman_set_wpm - Set waterfall power management
+ *
+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
+ *
+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
+ * accessible.
+ */
+int qman_set_wpm(int wpm_enable);
+
+/**
+ * qman_get_swp - Query the waterfall power management setting
+ *
+ * @wpm_enable: boolean, 1 = enable wpm, 0 = disable wpm.
+ *
+ * Return 0 for success, return -ENODEV if QMan misc_cfg register is not
+ * accessible.
+ */
+int qman_get_wpm(int *wpm_enable);
+
+/* The below qman_p_***() variants might be called in a migration situation
+ * (e.g. cpu hotplug). They are used to continue accessing the portal that
+ * execution was affine to prior to migration.
+ * @qman_portal specifies which portal the APIs will use.
+*/
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal
+									 *p);
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+u32 qman_p_poll_slow(struct qman_portal *p);
+void qman_p_poll(struct qman_portal *p);
+void qman_p_stop_dequeues(struct qman_portal *p);
+void qman_p_start_dequeues(struct qman_portal *p);
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools);
+u32 qman_p_static_dequeue_get(struct qman_portal *p);
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+						int park_request);
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+				u32 flags __maybe_unused, u32 vdqcr);
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+					const struct qm_fd *fd, u32 flags);
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				struct qman_fq *orp, u16 orp_seqnum);
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+				const struct qm_fd *fd, u32 flags,
+				qman_cb_precommit cb, void *cb_arg);
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* __FSL_QMAN_H */
-- 
1.7.9.5



More information about the Linuxppc-dev mailing list