summaryrefslogtreecommitdiffhomepage
path: root/pkg/safecopy/memclr_arm64.s
diff options
context:
space:
mode:
authorKevin Krakauer <krakauer@google.com>2020-01-29 13:21:12 -0800
committerKevin Krakauer <krakauer@google.com>2020-01-29 13:21:12 -0800
commitb615f94aeacb2c21bb59c8b44f303e7b7ca05ad6 (patch)
tree79907e842eeba2a1f01acb5b5661800dc6ef2174 /pkg/safecopy/memclr_arm64.s
parentd6a2e01d3e57e0837c7e5cfda3b56c4dcfbb4627 (diff)
parent148fda60e8dee29f2df85e3104e3d5de1a225bcf (diff)
Merge branch 'master' into ipt-udp-matchers
Diffstat (limited to 'pkg/safecopy/memclr_arm64.s')
-rw-r--r--pkg/safecopy/memclr_arm64.s74
1 files changed, 74 insertions, 0 deletions
diff --git a/pkg/safecopy/memclr_arm64.s b/pkg/safecopy/memclr_arm64.s
new file mode 100644
index 000000000..7361b9067
--- /dev/null
+++ b/pkg/safecopy/memclr_arm64.s
@@ -0,0 +1,74 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// handleMemclrFault returns (the value stored in R0, the value stored in R1).
+// Control is transferred to it when memclr below receives SIGSEGV or SIGBUS,
+// with the faulting address stored in R0 and the signal number stored in R1.
+//
+// It must have the same frame configuration as memclr so that it can undo any
+// potential call frame set up by the assembler.
+TEXT handleMemclrFault(SB), NOSPLIT, $0-28
+ MOVD R0, addr+16(FP)
+ MOVW R1, sig+24(FP)
+ RET
+
+// See the corresponding doc in safecopy_unsafe.go
+//
+// The code is derived from runtime.memclrNoHeapPointers.
+//
+// func memclr(ptr unsafe.Pointer, n uintptr) (fault unsafe.Pointer, sig int32)
+TEXT ·memclr(SB), NOSPLIT, $0-28
+ // Store 0 as the returned signal number. If we run to completion,
+ // this is the value the caller will see; if a signal is received,
+ // handleMemclrFault will store a different value in this address.
+ MOVW $0, sig+24(FP)
+ MOVD ptr+0(FP), R0
+ MOVD n+8(FP), R1
+
+ // If size is less than 16 bytes, use tail_zero to zero what remains
+ CMP $16, R1
+ BLT tail_zero
+ // Get buffer offset into 16 byte aligned address for better performance
+ ANDS $15, R0, ZR
+ BNE unaligned_to_16
+aligned_to_16:
+ LSR $4, R1, R2
+zero_by_16:
+ STP.P (ZR, ZR), 16(R0) // Store pair with post index.
+ SUBS $1, R2, R2
+ BNE zero_by_16
+ ANDS $15, R1, R1
+ BEQ end
+
+ // Zero buffer with size=R1 < 16
+tail_zero:
+ TBZ $3, R1, tail_zero_4
+ MOVD.P ZR, 8(R0)
+tail_zero_4:
+ TBZ $2, R1, tail_zero_2
+ MOVW.P ZR, 4(R0)
+tail_zero_2:
+ TBZ $1, R1, tail_zero_1
+ MOVH.P ZR, 2(R0)
+tail_zero_1:
+ TBZ $0, R1, end
+ MOVB ZR, (R0)
+end:
+ RET
+
+unaligned_to_16:
+ MOVD R0, R2
+head_loop:
+ MOVBU.P ZR, 1(R0)
+ ANDS $15, R0, ZR
+ BNE head_loop
+ // Adjust length for what remains
+ SUB R2, R0, R3
+ SUB R3, R1
+ // If size is less than 16 bytes, use tail_zero to zero what remains
+ CMP $16, R1
+ BLT tail_zero
+ B aligned_to_16