py/emitnative: Improve Viper register-indexed code for Arm.
This commit lets the Viper code generator use optimised code sequences for register-indexed load and store operations when generating Arm code. The existing code defaulted to generic multi-operations code sequences for Arm code on most cases. Now optimised implementations are provided for register-indexed loads and stores of all data sizes, taking at most two machine opcodes for each operation. Signed-off-by: Alessandro Gatti <a.gatti@frob.it>
This commit is contained in:
16
py/asmarm.c
16
py/asmarm.c
@@ -343,6 +343,12 @@ void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
|
||||
emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
|
||||
}
|
||||
|
||||
void asm_arm_ldrh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
|
||||
// ldrh doesn't support scaled register index
|
||||
emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
|
||||
emit_al(as, 0x19000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // ldrh rd, [rm, r8];
|
||||
}
|
||||
|
||||
void asm_arm_ldrh_reg_reg_offset(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
|
||||
if (byte_offset < 0x100) {
|
||||
// ldrh rd, [rn, #off]
|
||||
@@ -360,6 +366,16 @@ void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
|
||||
emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
|
||||
}
|
||||
|
||||
void asm_arm_ldrb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
|
||||
// ldrb rd, [rm, rn]
|
||||
emit_al(as, 0x7d00000 | (rm << 16) | (rd << 12) | rn);
|
||||
}
|
||||
|
||||
void asm_arm_ldr_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
|
||||
// ldr rd, [rm, rn, lsl #2]
|
||||
emit_al(as, 0x7900100 | (rm << 16) | (rd << 12) | rn);
|
||||
}
|
||||
|
||||
void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
|
||||
// str rd, [rm, #off]
|
||||
emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
|
||||
|
||||
Reference in New Issue
Block a user