Text file src/internal/runtime/atomic/atomic_arm64.s

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "go_asm.h"
     6  #include "textflag.h"
     7  
     8  TEXT ·Casint32(SB), NOSPLIT, $0-17
     9  	B	·Cas(SB)
    10  
    11  TEXT ·Casint64(SB), NOSPLIT, $0-25
    12  	B	·Cas64(SB)
    13  
    14  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    15  	B	·Cas64(SB)
    16  
    17  TEXT ·CasRel(SB), NOSPLIT, $0-17
    18  	B	·Cas(SB)
    19  
    20  TEXT ·Loadint32(SB), NOSPLIT, $0-12
    21  	B	·Load(SB)
    22  
    23  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    24  	B	·Load64(SB)
    25  
    26  TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
    27  	B	·Load64(SB)
    28  
    29  TEXT ·Loaduint(SB), NOSPLIT, $0-16
    30  	B	·Load64(SB)
    31  
    32  TEXT ·Storeint32(SB), NOSPLIT, $0-12
    33  	B	·Store(SB)
    34  
    35  TEXT ·Storeint64(SB), NOSPLIT, $0-16
    36  	B	·Store64(SB)
    37  
    38  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    39  	B	·Store64(SB)
    40  
    41  TEXT ·Xaddint32(SB), NOSPLIT, $0-20
    42  	B	·Xadd(SB)
    43  
    44  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    45  	B	·Xadd64(SB)
    46  
    47  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    48  	B	·Xadd64(SB)
    49  
    50  TEXT ·Casp1(SB), NOSPLIT, $0-25
    51  	B ·Cas64(SB)
    52  
    53  // uint32 ·Load(uint32 volatile* addr)
    54  TEXT ·Load(SB),NOSPLIT,$0-12
    55  	MOVD	ptr+0(FP), R0
    56  	LDARW	(R0), R0
    57  	MOVW	R0, ret+8(FP)
    58  	RET
    59  
    60  // uint8 ·Load8(uint8 volatile* addr)
    61  TEXT ·Load8(SB),NOSPLIT,$0-9
    62  	MOVD	ptr+0(FP), R0
    63  	LDARB	(R0), R0
    64  	MOVB	R0, ret+8(FP)
    65  	RET
    66  
    67  // uint64 ·Load64(uint64 volatile* addr)
    68  TEXT ·Load64(SB),NOSPLIT,$0-16
    69  	MOVD	ptr+0(FP), R0
    70  	LDAR	(R0), R0
    71  	MOVD	R0, ret+8(FP)
    72  	RET
    73  
    74  // void *·Loadp(void *volatile *addr)
    75  TEXT ·Loadp(SB),NOSPLIT,$0-16
    76  	MOVD	ptr+0(FP), R0
    77  	LDAR	(R0), R0
    78  	MOVD	R0, ret+8(FP)
    79  	RET
    80  
    81  // uint32 ·LoadAcq(uint32 volatile* addr)
    82  TEXT ·LoadAcq(SB),NOSPLIT,$0-12
    83  	B	·Load(SB)
    84  
    85  // uint64 ·LoadAcquintptr(uint64 volatile* addr)
    86  TEXT ·LoadAcq64(SB),NOSPLIT,$0-16
    87  	B	·Load64(SB)
    88  
    89  // uintptr ·LoadAcq64(uintptr volatile* addr)
    90  TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16
    91  	B	·Load64(SB)
    92  
    93  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
    94  	B	·Store64(SB)
    95  
    96  TEXT ·StoreRel(SB), NOSPLIT, $0-12
    97  	B	·Store(SB)
    98  
    99  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
   100  	B	·Store64(SB)
   101  
   102  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   103  	B	·Store64(SB)
   104  
   105  TEXT ·Store(SB), NOSPLIT, $0-12
   106  	MOVD	ptr+0(FP), R0
   107  	MOVW	val+8(FP), R1
   108  	STLRW	R1, (R0)
   109  	RET
   110  
   111  TEXT ·Store8(SB), NOSPLIT, $0-9
   112  	MOVD	ptr+0(FP), R0
   113  	MOVB	val+8(FP), R1
   114  	STLRB	R1, (R0)
   115  	RET
   116  
   117  TEXT ·Store64(SB), NOSPLIT, $0-16
   118  	MOVD	ptr+0(FP), R0
   119  	MOVD	val+8(FP), R1
   120  	STLR	R1, (R0)
   121  	RET
   122  
   123  // uint32 Xchg(ptr *uint32, new uint32)
   124  // Atomically:
   125  //	old := *ptr;
   126  //	*ptr = new;
   127  //	return old;
   128  TEXT ·Xchg(SB), NOSPLIT, $0-20
   129  	MOVD	ptr+0(FP), R0
   130  	MOVW	new+8(FP), R1
   131  #ifndef GOARM64_LSE
   132  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   133  	CBZ 	R4, load_store_loop
   134  #endif
   135  	SWPALW	R1, (R0), R2
   136  	MOVW	R2, ret+16(FP)
   137  	RET
   138  #ifndef GOARM64_LSE
   139  load_store_loop:
   140  	LDAXRW	(R0), R2
   141  	STLXRW	R1, (R0), R3
   142  	CBNZ	R3, load_store_loop
   143  	MOVW	R2, ret+16(FP)
   144  	RET
   145  #endif
   146  
   147  // uint64 Xchg64(ptr *uint64, new uint64)
   148  // Atomically:
   149  //	old := *ptr;
   150  //	*ptr = new;
   151  //	return old;
   152  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   153  	MOVD	ptr+0(FP), R0
   154  	MOVD	new+8(FP), R1
   155  #ifndef GOARM64_LSE
   156  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   157  	CBZ 	R4, load_store_loop
   158  #endif
   159  	SWPALD	R1, (R0), R2
   160  	MOVD	R2, ret+16(FP)
   161  	RET
   162  #ifndef GOARM64_LSE
   163  load_store_loop:
   164  	LDAXR	(R0), R2
   165  	STLXR	R1, (R0), R3
   166  	CBNZ	R3, load_store_loop
   167  	MOVD	R2, ret+16(FP)
   168  	RET
   169  #endif
   170  
   171  // bool Cas(uint32 *ptr, uint32 old, uint32 new)
   172  // Atomically:
   173  //	if(*val == old){
   174  //		*val = new;
   175  //		return 1;
   176  //	} else
   177  //		return 0;
   178  TEXT ·Cas(SB), NOSPLIT, $0-17
   179  	MOVD	ptr+0(FP), R0
   180  	MOVW	old+8(FP), R1
   181  	MOVW	new+12(FP), R2
   182  #ifndef GOARM64_LSE
   183  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   184  	CBZ 	R4, load_store_loop
   185  #endif
   186  	MOVD	R1, R3
   187  	CASALW	R3, (R0), R2
   188  	CMP 	R1, R3
   189  	CSET	EQ, R0
   190  	MOVB	R0, ret+16(FP)
   191  	RET
   192  #ifndef GOARM64_LSE
   193  load_store_loop:
   194  	LDAXRW	(R0), R3
   195  	CMPW	R1, R3
   196  	BNE	ok
   197  	STLXRW	R2, (R0), R3
   198  	CBNZ	R3, load_store_loop
   199  ok:
   200  	CSET	EQ, R0
   201  	MOVB	R0, ret+16(FP)
   202  	RET
   203  #endif
   204  
   205  // bool ·Cas64(uint64 *ptr, uint64 old, uint64 new)
   206  // Atomically:
   207  //      if(*val == old){
   208  //              *val = new;
   209  //              return 1;
   210  //      } else {
   211  //              return 0;
   212  //      }
   213  TEXT ·Cas64(SB), NOSPLIT, $0-25
   214  	MOVD	ptr+0(FP), R0
   215  	MOVD	old+8(FP), R1
   216  	MOVD	new+16(FP), R2
   217  #ifndef GOARM64_LSE
   218  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   219  	CBZ 	R4, load_store_loop
   220  #endif
   221  	MOVD	R1, R3
   222  	CASALD	R3, (R0), R2
   223  	CMP 	R1, R3
   224  	CSET	EQ, R0
   225  	MOVB	R0, ret+24(FP)
   226  	RET
   227  #ifndef GOARM64_LSE
   228  load_store_loop:
   229  	LDAXR	(R0), R3
   230  	CMP	R1, R3
   231  	BNE	ok
   232  	STLXR	R2, (R0), R3
   233  	CBNZ	R3, load_store_loop
   234  ok:
   235  	CSET	EQ, R0
   236  	MOVB	R0, ret+24(FP)
   237  	RET
   238  #endif
   239  
   240  // uint32 xadd(uint32 volatile *ptr, int32 delta)
   241  // Atomically:
   242  //      *val += delta;
   243  //      return *val;
   244  TEXT ·Xadd(SB), NOSPLIT, $0-20
   245  	MOVD	ptr+0(FP), R0
   246  	MOVW	delta+8(FP), R1
   247  #ifndef GOARM64_LSE
   248  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   249  	CBZ 	R4, load_store_loop
   250  #endif
   251  	LDADDALW	R1, (R0), R2
   252  	ADD 	R1, R2
   253  	MOVW	R2, ret+16(FP)
   254  	RET
   255  #ifndef GOARM64_LSE
   256  load_store_loop:
   257  	LDAXRW	(R0), R2
   258  	ADDW	R2, R1, R2
   259  	STLXRW	R2, (R0), R3
   260  	CBNZ	R3, load_store_loop
   261  	MOVW	R2, ret+16(FP)
   262  	RET
   263  #endif
   264  
   265  // uint64 Xadd64(uint64 volatile *ptr, int64 delta)
   266  // Atomically:
   267  //      *val += delta;
   268  //      return *val;
   269  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   270  	MOVD	ptr+0(FP), R0
   271  	MOVD	delta+8(FP), R1
   272  #ifndef GOARM64_LSE
   273  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   274  	CBZ 	R4, load_store_loop
   275  #endif
   276  	LDADDALD	R1, (R0), R2
   277  	ADD 	R1, R2
   278  	MOVD	R2, ret+16(FP)
   279  	RET
   280  #ifndef GOARM64_LSE
   281  load_store_loop:
   282  	LDAXR	(R0), R2
   283  	ADD	R2, R1, R2
   284  	STLXR	R2, (R0), R3
   285  	CBNZ	R3, load_store_loop
   286  	MOVD	R2, ret+16(FP)
   287  	RET
   288  #endif
   289  
   290  TEXT ·Xchgint32(SB), NOSPLIT, $0-20
   291  	B	·Xchg(SB)
   292  
   293  TEXT ·Xchgint64(SB), NOSPLIT, $0-24
   294  	B	·Xchg64(SB)
   295  
   296  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   297  	B	·Xchg64(SB)
   298  
   299  TEXT ·And8(SB), NOSPLIT, $0-9
   300  	MOVD	ptr+0(FP), R0
   301  	MOVB	val+8(FP), R1
   302  #ifndef GOARM64_LSE
   303  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   304  	CBZ 	R4, load_store_loop
   305  #endif
   306  	MVN 	R1, R2
   307  	LDCLRALB	R2, (R0), R3
   308  	RET
   309  #ifndef GOARM64_LSE
   310  load_store_loop:
   311  	LDAXRB	(R0), R2
   312  	AND	R1, R2
   313  	STLXRB	R2, (R0), R3
   314  	CBNZ	R3, load_store_loop
   315  	RET
   316  #endif
   317  
   318  TEXT ·Or8(SB), NOSPLIT, $0-9
   319  	MOVD	ptr+0(FP), R0
   320  	MOVB	val+8(FP), R1
   321  #ifndef GOARM64_LSE
   322  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   323  	CBZ 	R4, load_store_loop
   324  #endif
   325  	LDORALB	R1, (R0), R2
   326  	RET
   327  #ifndef GOARM64_LSE
   328  load_store_loop:
   329  	LDAXRB	(R0), R2
   330  	ORR	R1, R2
   331  	STLXRB	R2, (R0), R3
   332  	CBNZ	R3, load_store_loop
   333  	RET
   334  #endif
   335  
   336  // func And(addr *uint32, v uint32)
   337  TEXT ·And(SB), NOSPLIT, $0-12
   338  	MOVD	ptr+0(FP), R0
   339  	MOVW	val+8(FP), R1
   340  #ifndef GOARM64_LSE
   341  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   342  	CBZ 	R4, load_store_loop
   343  #endif
   344  	MVN 	R1, R2
   345  	LDCLRALW	R2, (R0), R3
   346  	RET
   347  #ifndef GOARM64_LSE
   348  load_store_loop:
   349  	LDAXRW	(R0), R2
   350  	AND	R1, R2
   351  	STLXRW	R2, (R0), R3
   352  	CBNZ	R3, load_store_loop
   353  	RET
   354  #endif
   355  
   356  // func Or(addr *uint32, v uint32)
   357  TEXT ·Or(SB), NOSPLIT, $0-12
   358  	MOVD	ptr+0(FP), R0
   359  	MOVW	val+8(FP), R1
   360  #ifndef GOARM64_LSE
   361  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   362  	CBZ 	R4, load_store_loop
   363  #endif
   364  	LDORALW	R1, (R0), R2
   365  	RET
   366  #ifndef GOARM64_LSE
   367  load_store_loop:
   368  	LDAXRW	(R0), R2
   369  	ORR	R1, R2
   370  	STLXRW	R2, (R0), R3
   371  	CBNZ	R3, load_store_loop
   372  	RET
   373  #endif
   374  
   375  // func Or32(addr *uint32, v uint32) old uint32
   376  TEXT ·Or32(SB), NOSPLIT, $0-20
   377  	MOVD	ptr+0(FP), R0
   378  	MOVW	val+8(FP), R1
   379  #ifndef GOARM64_LSE
   380  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   381  	CBZ 	R4, load_store_loop
   382  #endif
   383  	LDORALW	R1, (R0), R2
   384  	MOVD	R2, ret+16(FP)
   385  	RET
   386  #ifndef GOARM64_LSE
   387  load_store_loop:
   388  	LDAXRW	(R0), R2
   389  	ORR	R1, R2, R3
   390  	STLXRW	R3, (R0), R4
   391  	CBNZ	R4, load_store_loop
   392  	MOVD R2, ret+16(FP)
   393  	RET
   394  #endif
   395  
   396  // func And32(addr *uint32, v uint32) old uint32
   397  TEXT ·And32(SB), NOSPLIT, $0-20
   398  	MOVD	ptr+0(FP), R0
   399  	MOVW	val+8(FP), R1
   400  #ifndef GOARM64_LSE
   401  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   402  	CBZ 	R4, load_store_loop
   403  #endif
   404  	MVN 	R1, R2
   405  	LDCLRALW	R2, (R0), R3
   406  	MOVD	R3, ret+16(FP)
   407  	RET
   408  #ifndef GOARM64_LSE
   409  load_store_loop:
   410  	LDAXRW	(R0), R2
   411  	AND	R1, R2, R3
   412  	STLXRW	R3, (R0), R4
   413  	CBNZ	R4, load_store_loop
   414  	MOVD R2, ret+16(FP)
   415  	RET
   416  #endif
   417  
   418  // func Or64(addr *uint64, v uint64) old uint64
   419  TEXT ·Or64(SB), NOSPLIT, $0-24
   420  	MOVD	ptr+0(FP), R0
   421  	MOVD	val+8(FP), R1
   422  #ifndef GOARM64_LSE
   423  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   424  	CBZ 	R4, load_store_loop
   425  #endif
   426  	LDORALD	R1, (R0), R2
   427  	MOVD	R2, ret+16(FP)
   428  	RET
   429  #ifndef GOARM64_LSE
   430  load_store_loop:
   431  	LDAXR	(R0), R2
   432  	ORR	R1, R2, R3
   433  	STLXR	R3, (R0), R4
   434  	CBNZ	R4, load_store_loop
   435  	MOVD 	R2, ret+16(FP)
   436  	RET
   437  #endif
   438  
   439  // func And64(addr *uint64, v uint64) old uint64
   440  TEXT ·And64(SB), NOSPLIT, $0-24
   441  	MOVD	ptr+0(FP), R0
   442  	MOVD	val+8(FP), R1
   443  #ifndef GOARM64_LSE
   444  	MOVBU	internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
   445  	CBZ 	R4, load_store_loop
   446  #endif
   447  	MVN 	R1, R2
   448  	LDCLRALD	R2, (R0), R3
   449  	MOVD	R3, ret+16(FP)
   450  	RET
   451  #ifndef GOARM64_LSE
   452  load_store_loop:
   453  	LDAXR	(R0), R2
   454  	AND	R1, R2, R3
   455  	STLXR	R3, (R0), R4
   456  	CBNZ	R4, load_store_loop
   457  	MOVD 	R2, ret+16(FP)
   458  	RET
   459  #endif
   460  
   461  // func Anduintptr(addr *uintptr, v uintptr) old uintptr
   462  TEXT ·Anduintptr(SB), NOSPLIT, $0-24
   463  	B	·And64(SB)
   464  
   465  // func Oruintptr(addr *uintptr, v uintptr) old uintptr
   466  TEXT ·Oruintptr(SB), NOSPLIT, $0-24
   467  	B	·Or64(SB)
   468  

View as plain text