summaryrefslogtreecommitdiffhomepage
path: root/pkg/atomicbitops/atomic_bitops_arm64.s
blob: 97f8808c1616fb84bfb706ec72a4b76af573786f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// +build arm64

#include "textflag.h"

TEXT ·AndUint32(SB),$0-12
  MOVD    ptr+0(FP), R0
  MOVW    val+8(FP), R1
again:
  LDAXRW  (R0), R2
  ANDW    R1, R2
  STLXRW  R2, (R0), R3
  CBNZ    R3, again
  RET

TEXT ·OrUint32(SB),$0-12
  MOVD    ptr+0(FP), R0
  MOVW    val+8(FP), R1
again:
  LDAXRW  (R0), R2
  ORRW    R1, R2
  STLXRW  R2, (R0), R3
  CBNZ    R3, again
  RET

TEXT ·XorUint32(SB),$0-12
  MOVD    ptr+0(FP), R0
  MOVW    val+8(FP), R1
again:
  LDAXRW  (R0), R2
  EORW    R1, R2
  STLXRW  R2, (R0), R3
  CBNZ    R3, again
  RET

TEXT ·CompareAndSwapUint32(SB),$0-20
  MOVD addr+0(FP), R0
  MOVW old+8(FP), R1
  MOVW new+12(FP), R2

again:
  LDAXRW (R0), R3
  CMPW R1, R3
  BNE done
  STLXRW R2, (R0), R4
  CBNZ R4, again
done:
  MOVW R3, prev+16(FP)
  RET

TEXT ·AndUint64(SB),$0-16
  MOVD    ptr+0(FP), R0
  MOVD    val+8(FP), R1
again:
  LDAXR   (R0), R2
  AND     R1, R2
  STLXR   R2, (R0), R3
  CBNZ    R3, again
  RET

TEXT ·OrUint64(SB),$0-16
  MOVD    ptr+0(FP), R0
  MOVD    val+8(FP), R1
again:
  LDAXR   (R0), R2
  ORR     R1, R2
  STLXR   R2, (R0), R3
  CBNZ    R3, again
  RET

TEXT ·XorUint64(SB),$0-16
  MOVD    ptr+0(FP), R0
  MOVD    val+8(FP), R1
again:
  LDAXR   (R0), R2
  EOR     R1, R2
  STLXR   R2, (R0), R3
  CBNZ    R3, again
  RET

TEXT ·CompareAndSwapUint64(SB),$0-32
  MOVD addr+0(FP), R0
  MOVD old+8(FP), R1
  MOVD new+16(FP), R2

again:
  LDAXR (R0), R3
  CMP R1, R3
  BNE done
  STLXR R2, (R0), R4
  CBNZ R4, again
done:
  MOVD R3, prev+24(FP)
  RET

TEXT ·IncUnlessZeroInt32(SB),NOSPLIT,$0-9
  MOVD addr+0(FP), R0

again:
  LDAXRW (R0), R1
  CBZ R1, fail
  ADDW $1, R1
  STLXRW R1, (R0), R2
  CBNZ R2, again
  MOVW $1, R2
  MOVB R2, ret+8(FP)
  RET
fail:
  MOVB ZR, ret+8(FP)
  RET

TEXT ·DecUnlessOneInt32(SB),NOSPLIT,$0-9
  MOVD addr+0(FP), R0

again:
  LDAXRW (R0), R1
  SUBSW $1, R1, R1
  BEQ fail
  STLXRW R1, (R0), R2
  CBNZ R2, again
  MOVW $1, R2
  MOVB R2, ret+8(FP)
  RET
fail:
  MOVB ZR, ret+8(FP)
  RET