[Skiboot] [RFC PATCH 4/6] core/flash: Make opal_flash_op() actually asynchronous
Stewart Smith
stewart at linux.ibm.com
Thu Feb 28 17:18:24 AEDT 2019
From: Cyril Bur <cyril.bur at au1.ibm.com>
This patch provides a simple (although not particularly efficient)
asynchronous capability to the opal_flash interface. The advantage of
this approach is that it doesn't require any changing of blocklevel or
its backends to provide an asynchronous implementation. This is also the
disadvantage of this implementation as all it actually does is break the
work up in chunks that it can performed quickly, but still
synchronously. Only a backend could provide a more asynchronous
implementation.
This solves a problem we have right now with the opal_flash_erase call
where it can block in Skiboot for around three minutes. This causes a
variety of problems in Linux due to a processor being gone for a long
time.
For example:
[ 98.610043] INFO: rcu_sched detected stalls on CPUs/tasks:
[ 98.610050] 113-...: (1 GPs behind) idle=96f/140000000000000/0 softirq=527/528 fqs=1044
[ 98.610051] (detected by 112, t=2102 jiffies, g=223, c=222, q=123)
[ 98.610060] Task dump for CPU 113:
[ 98.610062] pflash R running task 0 3335 3333 0x00040004
[ 98.610066] Call Trace:
[ 98.610070] [c000001fdd847730] [0000000000000001] 0x1 (unreliable)
[ 98.610076] [c000001fdd847900] [c000000000013854] __switch_to+0x1e8/0x1f4
[ 98.610081] [c000001fdd847960] [c0000000006122c4] __schedule+0x32c/0x874
[ 98.610083] [c000001fdd847a30] [c000001fdd847b40] 0xc000001fdd847b40
It is for this reason that breaking the work up in smaller chunks solves
this problem as Skiboot can return the CPU to Linux between chunks to
avoid Linux getting upset.
Reported-By: Samuel Mendoza-Jonas <sam at mendozajonas.com>
Signed-off-by: Cyril Bur <cyril.bur at au1.ibm.com>
[stewart: PR_TRACE not printf, operate on 1 block at a time]
Signed-off-by: Stewart Smith <stewart at linux.ibm.com>
---
NOTE: As mentioned in cover letter, when used with hiomap we get a bunch
of recursive poller warnings.
---
core/flash.c | 126 ++++++++++++++++++++----
core/timer.c | 7 ++
doc/opal-api/opal-flash-110-111-112.rst | 4 +
3 files changed, 120 insertions(+), 17 deletions(-)
diff --git a/core/flash.c b/core/flash.c
index 69d157afc16d..04d99348515e 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -30,6 +30,23 @@
#include <libstb/trustedboot.h>
#include <libxz/xz.h>
#include <elf.h>
+#include <timer.h>
+#include <timebase.h>
+
+enum flash_op {
+ FLASH_OP_READ,
+ FLASH_OP_WRITE,
+ FLASH_OP_ERASE,
+};
+
+struct flash_async_info {
+ enum flash_op op;
+ struct timer poller;
+ uint64_t token;
+ uint64_t pos;
+ uint64_t len;
+ uint64_t buf;
+};
struct flash {
struct list_node list;
@@ -39,6 +56,7 @@ struct flash {
uint64_t size;
uint32_t block_size;
int id;
+ struct flash_async_info async;
};
static LIST_HEAD(flashes);
@@ -214,6 +232,62 @@ static int flash_nvram_probe(struct flash *flash, struct ffs_handle *ffs)
/* core flash support */
+/*
+ * Called with flash lock held, drop it on async completion
+ */
+static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unused)
+{
+ struct flash *flash = data;
+ uint64_t offset, buf, len;
+ int rc;
+
+ offset = flash->async.pos;
+ buf = flash->async.buf;
+ /*
+ * LPC is around 1.75MB/sec.
+ * This means we have around 28x64kb blocks/sec.
+ * So, currently we force 1 block maximum per cycle through OPAL.
+ * This means one 64k block every 36ms, which is a *long* time in OPAL.
+ * For 4k blocks, it's every 2.2ms, which is better (but still not
+ * great).
+ */
+ len = MIN(flash->async.len, flash->block_size);
+ prlog(PR_TRACE, "Flash poll op %d len %llu\n", flash->async.op, len);
+
+ switch (flash->async.op) {
+ case FLASH_OP_READ:
+ rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, len);
+ break;
+ case FLASH_OP_WRITE:
+ rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, len);
+ break;
+ case FLASH_OP_ERASE:
+ rc = blocklevel_erase(flash->bl, offset, len);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (rc)
+ rc = OPAL_HARDWARE;
+
+ flash->async.pos += len;
+ flash->async.buf += len;
+ flash->async.len -= len;
+ if (!rc && flash->async.len) {
+ /*
+ * We want to get called pretty much straight away, just have
+ * to be sure that we jump back out to Linux so that if this
+ * very long we don't cause RCU or the scheduler to freak
+ */
+ schedule_timer(&flash->async.poller, 0);
+ return;
+ }
+
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
+ flash_release(flash);
+}
+
static struct dt_node *flash_add_dt_node(struct flash *flash, int id)
{
struct dt_node *flash_node;
@@ -312,6 +386,7 @@ int flash_register(struct blocklevel_device *bl)
flash->size = size;
flash->block_size = block_size;
flash->id = num_flashes();
+ init_timer(&flash->async.poller, flash_poll, flash);
rc = ffs_init(0, flash->size, bl, &ffs, 1);
if (rc) {
@@ -340,16 +415,11 @@ int flash_register(struct blocklevel_device *bl)
return OPAL_SUCCESS;
}
-enum flash_op {
- FLASH_OP_READ,
- FLASH_OP_WRITE,
- FLASH_OP_ERASE,
-};
-
static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
uint64_t buf, uint64_t size, uint64_t token)
{
struct flash *flash = NULL;
+ uint64_t len;
int rc;
list_for_each(&flashes, flash, list)
@@ -368,9 +438,17 @@ static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
prlog(PR_DEBUG, "Requested flash op %d beyond flash size %" PRIu64 "\n",
op, flash->size);
rc = OPAL_PARAMETER;
- goto err;
+ goto out;
}
+ len = MIN(size, flash->block_size*10);
+ prlog(PR_TRACE, "Flash op %d len %llu\n", op, len);
+ flash->async.op = op;
+ flash->async.token = token;
+ flash->async.buf = buf + len;
+ flash->async.len = size - len;
+ flash->async.pos = offset + len;
+
/*
* These ops intentionally have no smarts (ecc correction or erase
* before write) to them.
@@ -380,29 +458,43 @@ static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
*/
switch (op) {
case FLASH_OP_READ:
- rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, size);
+ rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, len);
break;
case FLASH_OP_WRITE:
- rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, size);
+ rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, len);
break;
case FLASH_OP_ERASE:
- rc = blocklevel_erase(flash->bl, offset, size);
+ rc = blocklevel_erase(flash->bl, offset, len);
break;
default:
assert(0);
}
if (rc) {
+ prlog(PR_ERR, "%s: Op %d failed with rc %d\n", __func__, op, rc);
rc = OPAL_HARDWARE;
- goto err;
+ goto out;
}
- flash_release(flash);
-
- opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, token, rc);
- return OPAL_ASYNC_COMPLETION;
-
-err:
+ if (size - len) {
+ /* Work remains */
+ schedule_timer(&flash->async.poller, 0);
+ /* Don't release the flash */
+ return OPAL_ASYNC_COMPLETION;
+ } else {
+ /*
+ * As tempting as it might be here to return OPAL_SUCCESS
+ * here, don't! As of 1/07/2017 the powernv_flash driver in
+ * Linux will handle OPAL_SUCCESS as an error, the only thing
+ * that makes it handle things as though they're working is
+ * receiving OPAL_ASYNC_COMPLETION.
+ *
+ * XXX TODO: Revisit this in a few years *sigh*
+ */
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
+ }
+ rc = OPAL_ASYNC_COMPLETION;
+out:
flash_release(flash);
return rc;
}
diff --git a/core/timer.c b/core/timer.c
index 1c539517839e..c68f978ccb07 100644
--- a/core/timer.c
+++ b/core/timer.c
@@ -199,6 +199,7 @@ static void __check_poll_timers(uint64_t now)
static void __check_timers(uint64_t now)
{
struct timer *t;
+ uint64_t stop = now + msecs_to_tb(50); /* Run timers for max 5ms */
for (;;) {
t = list_top(&timer_list, struct timer, link);
@@ -229,6 +230,12 @@ static void __check_timers(uint64_t now)
/* Update time stamp */
now = mftb();
+
+ /* Only run timers for a limited time to avoid jitter */
+ if (now > stop) {
+ prlog(PR_PRINTF, "Run timers for > 50ms\n");
+ break;
+ }
}
}
diff --git a/doc/opal-api/opal-flash-110-111-112.rst b/doc/opal-api/opal-flash-110-111-112.rst
index e05bd2db504b..c0fb7476ae7b 100644
--- a/doc/opal-api/opal-flash-110-111-112.rst
+++ b/doc/opal-api/opal-flash-110-111-112.rst
@@ -20,6 +20,10 @@ success, the calls will return ``OPAL_ASYNC_COMPLETION``, and an
opal_async_completion message will be sent (with the appropriate token
argument) when the operation completes.
+Due to an error in the powernv_flash driver in Linux these three OPAL
+calls should never return ``OPAL_SUCCESS`` as the driver is likely to
+treat this return value as an error.
+
All calls share the same return values:
``OPAL_ASYNC_COMPLETION``
--
2.20.1
More information about the Skiboot
mailing list