[Skiboot] [PATCH v3 18/29] core/lock: Introduce atomic cmpxchg and implement try_lock with it
Nicholas Piggin
npiggin at gmail.com
Wed Nov 29 16:36:56 AEDT 2017
cmpxchg will be used in a subsequent change, and this reduces the
amount of asm code.
Signed-off-by: Nicholas Piggin <npiggin at gmail.com>
---
asm/Makefile.inc | 2 +-
asm/lock.S | 43 -----------------------------------------
core/lock.c | 26 +++++++++++++++++++++----
include/lock.h | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--
4 files changed, 79 insertions(+), 50 deletions(-)
delete mode 100644 asm/lock.S
diff --git a/asm/Makefile.inc b/asm/Makefile.inc
index 2e678fd86..34e2b2883 100644
--- a/asm/Makefile.inc
+++ b/asm/Makefile.inc
@@ -1,7 +1,7 @@
# -*-Makefile-*-
SUBDIRS += asm
-ASM_OBJS = head.o lock.o misc.o kernel-wrapper.o rom_entry.o
+ASM_OBJS = head.o misc.o kernel-wrapper.o rom_entry.o
ASM=asm/built-in.o
# Add extra dependency to the kernel wrapper
diff --git a/asm/lock.S b/asm/lock.S
deleted file mode 100644
index ce28010fe..000000000
--- a/asm/lock.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright 2013-2014 IBM Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <asm-utils.h>
-#include <asm-offsets.h>
-#include <processor.h>
-
- .section ".text","ax"
- .balign 0x10
-
- /* bool try_lock(struct lock *lock) */
-.global __try_lock
-__try_lock:
- ld %r0,0(%r3)
- andi. %r10,%r0,1
- bne 2f
- lwz %r9,CPUTHREAD_PIR(%r13)
-1: ldarx %r0,0,%r3
- andi. %r10,%r0,1
- bne- 2f
- ori %r0,%r0,1
- rldimi %r0,%r9,32,0
- stdcx. %r0,0,%r3
- bne 1b
- sync
- li %r3,-1
- blr
-2: li %r3,0
- blr
-
diff --git a/core/lock.c b/core/lock.c
index 0868f2ba2..916a02412 100644
--- a/core/lock.c
+++ b/core/lock.c
@@ -33,7 +33,7 @@ static void lock_error(struct lock *l, const char *reason, uint16_t err)
{
bust_locks = true;
- fprintf(stderr, "LOCK ERROR: %s @%p (state: 0x%016lx)\n",
+ fprintf(stderr, "LOCK ERROR: %s @%p (state: 0x%016llx)\n",
reason, l, l->lock_val);
op_display(OP_FATAL, OP_MOD_LOCK, err);
@@ -73,12 +73,30 @@ bool lock_held_by_me(struct lock *l)
return l->lock_val == ((pir64 << 32) | 1);
}
+static inline bool __try_lock(struct cpu_thread *cpu, struct lock *l)
+{
+ uint64_t val;
+
+ val = cpu->pir;
+ val <<= 32;
+ val |= 1;
+
+ barrier();
+ if (__cmpxchg64(&l->lock_val, 0, val) == 0) {
+ sync();
+ return true;
+ }
+ return false;
+}
+
bool try_lock(struct lock *l)
{
- if (__try_lock(l)) {
+ struct cpu_thread *cpu = this_cpu();
+
+ if (__try_lock(cpu, l)) {
if (l->in_con_path)
- this_cpu()->con_suspend++;
- this_cpu()->lock_depth++;
+ cpu->con_suspend++;
+ cpu->lock_depth++;
return true;
}
return false;
diff --git a/include/lock.h b/include/lock.h
index 0ac943dc9..1597f4224 100644
--- a/include/lock.h
+++ b/include/lock.h
@@ -18,12 +18,13 @@
#define __LOCK_H
#include <stdbool.h>
+#include <processor.h>
struct lock {
/* Lock value has bit 63 as lock bit and the PIR of the owner
* in the top 32-bit
*/
- unsigned long lock_val;
+ uint64_t lock_val;
/*
* Set to true if lock is involved in the console flush path
@@ -63,7 +64,60 @@ static inline void init_lock(struct lock *l)
*l = (struct lock)LOCK_UNLOCKED;
}
-extern bool __try_lock(struct lock *l);
+/*
+ * Bare cmpxchg, no barriers.
+ */
+static inline uint32_t __cmpxchg32(uint32_t *mem, uint32_t old, uint32_t new)
+{
+ uint32_t prev;
+
+ asm volatile(
+ "# __cmpxchg32 \n"
+ "1: lwarx %0,0,%2 \n"
+ " cmpw %0,%3 \n"
+ " bne- 2f \n"
+ " stwcx. %4,0,%2 \n"
+ " bne- 1b \n"
+ "2: \n"
+
+ : "=&r"(prev), "+m"(*mem)
+ : "r"(mem), "r"(old), "r"(new)
+ : "cr0");
+
+ return prev;
+}
+
+static inline uint64_t __cmpxchg64(uint64_t *mem, uint64_t old, uint64_t new)
+{
+ uint64_t prev;
+
+ asm volatile(
+ "# __cmpxchg64 \n"
+ "1: ldarx %0,0,%2 \n"
+ " cmpd %0,%3 \n"
+ " bne- 2f \n"
+ " stdcx. %4,0,%2 \n"
+ " bne- 1b \n"
+ "2: \n"
+
+ : "=&r"(prev), "+m"(*mem)
+ : "r"(mem), "r"(old), "r"(new)
+ : "cr0");
+
+ return prev;
+}
+
+static inline uint32_t cmpxchg32(uint32_t *mem, uint32_t old, uint32_t new)
+{
+ uint32_t prev;
+
+ sync();
+ prev = __cmpxchg32(mem, old,new);
+ sync();
+
+ return prev;
+}
+
extern bool try_lock(struct lock *l);
extern void lock(struct lock *l);
extern void unlock(struct lock *l);
--
2.15.0
More information about the Skiboot
mailing list