52 lines
1.5 KiB
ArmAsm
52 lines
1.5 KiB
ArmAsm
.globl add64
|
||
.globl mul
|
||
|
||
.text
|
||
|
||
# > When primitive arguments twice the size of a pointer-word are passed on the
|
||
# > stack, they are naturally aligned. When they are passed in the integer
|
||
# > registers, they reside in an aligned even-odd register pair, with the even
|
||
# > register holding the least-significant bits.
|
||
|
||
# 64-bit integer addition
|
||
# arguments:
|
||
# a0: x lower 32 bits
|
||
# a1: x upper 32 bits
|
||
# a2: y lower 32 bits
|
||
# a3: y upper 32 bits
|
||
# return:
|
||
# a0: x+y lower 32 bits
|
||
# a1: x+y upper 32 bits
|
||
#
|
||
add64:
|
||
add a0, a0, a2 # add lower 32 bits
|
||
add t0, a1, a3 # add upper 32 bits
|
||
sltu t1, a0, a2 # if lower 32-bit sum < a2 then set t1=1 (carry bit)
|
||
add a1, t0, t1 # upper 32 bits of answer (upper sum + carry bit)
|
||
ret
|
||
|
||
# 32-bit shift-add multiplication
|
||
# arguments:
|
||
# a0: multiplicand
|
||
# a1: multiplier
|
||
# return:
|
||
# a0 = a0 × a1
|
||
#
|
||
mul:
|
||
mv t0, a1 # Save multiplier in t0
|
||
li a1, 0 # Initialize product in a1
|
||
|
||
multiply_loop:
|
||
beqz t0, done # If multiplier is 0, we're done
|
||
andi t1, t0, 1 # Check least significant bit
|
||
beqz t1, shift # If LSB is 0, skip addition
|
||
add a1, a1, a0 # Add multiplicand to product
|
||
|
||
shift:
|
||
slli a0, a0, 1 # Shift multiplicand left
|
||
srli t0, t0, 1 # Shift multiplier right
|
||
j multiply_loop # Continue loop
|
||
|
||
done:
|
||
mv a0, a1 # Move product to return register
|
||
ret
|