summaryrefslogtreecommitdiffhomepage
path: root/pkg/atomicbitops/atomicbitops_arm64.s
diff options
context:
space:
mode:
authorIan Lewis <ianmlewis@gmail.com>2020-08-17 21:44:31 -0400
committerIan Lewis <ianmlewis@gmail.com>2020-08-17 21:44:31 -0400
commitac324f646ee3cb7955b0b45a7453aeb9671cbdf1 (patch)
tree0cbc5018e8807421d701d190dc20525726c7ca76 /pkg/atomicbitops/atomicbitops_arm64.s
parent352ae1022ce19de28fc72e034cc469872ad79d06 (diff)
parent6d0c5803d557d453f15ac6f683697eeb46dab680 (diff)
Merge branch 'master' into ip-forwarding
- Merges aleksej-paschenko's with HEAD - Adds vfs2 support for ip_forward
Diffstat (limited to 'pkg/atomicbitops/atomicbitops_arm64.s')
-rw-r--r--pkg/atomicbitops/atomicbitops_arm64.s105
1 files changed, 105 insertions, 0 deletions
diff --git a/pkg/atomicbitops/atomicbitops_arm64.s b/pkg/atomicbitops/atomicbitops_arm64.s
new file mode 100644
index 000000000..5c780851b
--- /dev/null
+++ b/pkg/atomicbitops/atomicbitops_arm64.s
@@ -0,0 +1,105 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build arm64
+
+#include "textflag.h"
+
+TEXT ·AndUint32(SB),$0-12
+ MOVD ptr+0(FP), R0
+ MOVW val+8(FP), R1
+again:
+ LDAXRW (R0), R2
+ ANDW R1, R2
+ STLXRW R2, (R0), R3
+ CBNZ R3, again
+ RET
+
+TEXT ·OrUint32(SB),$0-12
+ MOVD ptr+0(FP), R0
+ MOVW val+8(FP), R1
+again:
+ LDAXRW (R0), R2
+ ORRW R1, R2
+ STLXRW R2, (R0), R3
+ CBNZ R3, again
+ RET
+
+TEXT ·XorUint32(SB),$0-12
+ MOVD ptr+0(FP), R0
+ MOVW val+8(FP), R1
+again:
+ LDAXRW (R0), R2
+ EORW R1, R2
+ STLXRW R2, (R0), R3
+ CBNZ R3, again
+ RET
+
+TEXT ·CompareAndSwapUint32(SB),$0-20
+ MOVD addr+0(FP), R0
+ MOVW old+8(FP), R1
+ MOVW new+12(FP), R2
+again:
+ LDAXRW (R0), R3
+ CMPW R1, R3
+ BNE done
+ STLXRW R2, (R0), R4
+ CBNZ R4, again
+done:
+ MOVW R3, prev+16(FP)
+ RET
+
+TEXT ·AndUint64(SB),$0-16
+ MOVD ptr+0(FP), R0
+ MOVD val+8(FP), R1
+again:
+ LDAXR (R0), R2
+ AND R1, R2
+ STLXR R2, (R0), R3
+ CBNZ R3, again
+ RET
+
+TEXT ·OrUint64(SB),$0-16
+ MOVD ptr+0(FP), R0
+ MOVD val+8(FP), R1
+again:
+ LDAXR (R0), R2
+ ORR R1, R2
+ STLXR R2, (R0), R3
+ CBNZ R3, again
+ RET
+
+TEXT ·XorUint64(SB),$0-16
+ MOVD ptr+0(FP), R0
+ MOVD val+8(FP), R1
+again:
+ LDAXR (R0), R2
+ EOR R1, R2
+ STLXR R2, (R0), R3
+ CBNZ R3, again
+ RET
+
+TEXT ·CompareAndSwapUint64(SB),$0-32
+ MOVD addr+0(FP), R0
+ MOVD old+8(FP), R1
+ MOVD new+16(FP), R2
+again:
+ LDAXR (R0), R3
+ CMP R1, R3
+ BNE done
+ STLXR R2, (R0), R4
+ CBNZ R4, again
+done:
+ MOVD R3, prev+24(FP)
+ RET