[PATCH 4.4 4/9] net/ncsi: Rework request index allocation
Gavin Shan
gwshan at linux.vnet.ibm.com
Fri Oct 21 11:47:41 AEDT 2016
The NCSI request index (struct ncsi_req::nr_id) is put into instance
ID (IID) field while sending NCSI command packet. It was designed the
available IDs are given in round-robin fashion. @ndp->request_id was
introduced to represent the next available ID, but it has been used
as number of successively allocated IDs. It breaks the round-robin
design. Besides, we shouldn't put 0 to NCSI command packet's IID
field, meaning ID#0 should be reserved according section 6.3.1.1
in NCSI spec (v1.1.0).
This fixes above two issues. With it applied, the available IDs will
be assigned in round-robin fashion and ID#0 won't be assigned.
Signed-off-by: Gavin Shan <gwshan at linux.vnet.ibm.com>
Reviewed-by: Joel Stanley <joel at jms.id.au>
---
net/ncsi/internal.h | 1 +
net/ncsi/ncsi-manage.c | 23 +++++++++++------------
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 339177a..212d77b 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -87,6 +87,7 @@ struct ncsi_dev_priv {
struct list_head ndp_packages;
atomic_t ndp_pending_reqs;
atomic_t ndp_last_req_idx;
+#define NCSI_REQ_START_IDX 1
spinlock_t ndp_req_lock;
struct ncsi_req ndp_reqs[256];
struct work_struct ndp_work;
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index b728614..44a740f 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -306,30 +306,29 @@ struct ncsi_req *ncsi_alloc_req(struct ncsi_dev_priv *ndp)
spin_lock_irqsave(&ndp->ndp_req_lock, flags);
/* Check if there is one available request until the ceiling */
- for (idx = atomic_read(&ndp->ndp_last_req_idx);
- !nr && idx < limit; idx++) {
+ for (idx = atomic_read(&ndp->ndp_last_req_idx); idx < limit; idx++) {
if (ndp->ndp_reqs[idx].nr_used)
continue;
- ndp->ndp_reqs[idx].nr_used = true;
nr = &ndp->ndp_reqs[idx];
- atomic_inc(&ndp->ndp_last_req_idx);
- if (atomic_read(&ndp->ndp_last_req_idx) >= limit)
- atomic_set(&ndp->ndp_last_req_idx, 0);
+ nr->nr_used = true;
+ atomic_set(&ndp->ndp_last_req_idx, idx + 1);
+ goto found;
}
/* Fail back to check from the starting cursor */
- for (idx = 0; !nr && idx < atomic_read(&ndp->ndp_last_req_idx); idx++) {
+ for (idx = NCSI_REQ_START_IDX;
+ idx < atomic_read(&ndp->ndp_last_req_idx); idx++) {
if (ndp->ndp_reqs[idx].nr_used)
continue;
- ndp->ndp_reqs[idx].nr_used = true;
nr = &ndp->ndp_reqs[idx];
- atomic_inc(&ndp->ndp_last_req_idx);
- if (atomic_read(&ndp->ndp_last_req_idx) >= limit)
- atomic_set(&ndp->ndp_last_req_idx, 0);
+ nr->nr_used = true;
+ atomic_set(&ndp->ndp_last_req_idx, idx + 1);
+ goto found;
}
+found:
spin_unlock_irqrestore(&ndp->ndp_req_lock, flags);
return nr;
}
@@ -834,7 +833,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
INIT_LIST_HEAD(&ndp->ndp_packages);
INIT_WORK(&ndp->ndp_work, ncsi_dev_work);
spin_lock_init(&ndp->ndp_req_lock);
- atomic_set(&ndp->ndp_last_req_idx, 0);
+ atomic_set(&ndp->ndp_last_req_idx, NCSI_REQ_START_IDX);
for (idx = 0; idx < 256; idx++) {
ndp->ndp_reqs[idx].nr_id = idx;
ndp->ndp_reqs[idx].nr_ndp = ndp;
--
2.1.0
More information about the openbmc
mailing list