From d2c9b859c407d94cc1b565d3169e2539fec054e6 Mon Sep 17 00:00:00 2001 From: Mohanson Date: Fri, 23 Feb 2024 13:36:49 +0800 Subject: [PATCH] Make sure x0 is not modified in mops (#403) * Make sure x0 is not modified in mops * Ensure aarch64 behavior is consistent with x86 * Refactor * Quick fix previous commit --- fuzz/fuzz_targets/asm.rs | 32 ++++++++++++++++++++++----- src/machine/asm/execute_aarch64.S | 36 +++++++++++++++++-------------- src/machine/asm/execute_x64.S | 27 ++++++++++++++--------- 3 files changed, 64 insertions(+), 31 deletions(-) diff --git a/fuzz/fuzz_targets/asm.rs b/fuzz/fuzz_targets/asm.rs index 7f528bf4..40b204a7 100644 --- a/fuzz/fuzz_targets/asm.rs +++ b/fuzz/fuzz_targets/asm.rs @@ -1,11 +1,13 @@ #![no_main] use ckb_vm::cost_model::constant_cycles; use ckb_vm::machine::asm::{AsmCoreMachine, AsmMachine}; -use ckb_vm::machine::{DefaultMachineBuilder, VERSION2}; +use ckb_vm::machine::{DefaultCoreMachine, DefaultMachineBuilder, VERSION2}; +use ckb_vm::memory::sparse::SparseMemory; +use ckb_vm::memory::wxorx::WXorXMemory; use ckb_vm::{Bytes, Error, ISA_A, ISA_B, ISA_IMC, ISA_MOP}; use libfuzzer_sys::fuzz_target; -fn run(data: &[u8]) -> Result { +fn run_asm(data: &[u8]) -> Result { let asm_core = AsmCoreMachine::new(ISA_IMC | ISA_A | ISA_B | ISA_MOP, VERSION2, 200_000); let core = DefaultMachineBuilder::>::new(asm_core) .instruction_cycle_func(Box::new(constant_cycles)) @@ -16,10 +18,30 @@ fn run(data: &[u8]) -> Result { machine.run() } +fn run_int(data: &[u8]) -> Result { + let machine_core = DefaultCoreMachine::>>::new( + ISA_IMC | ISA_A | ISA_B | ISA_MOP, + VERSION2, + 200_000, + ); + let mut machine = DefaultMachineBuilder::new(machine_core) + .instruction_cycle_func(Box::new(constant_cycles)) + .build(); + let program = Bytes::copy_from_slice(data); + machine.load_program(&program, &[])?; + machine.run() +} + fuzz_target!(|data: &[u8]| { - let r0 = run(data); - let r1 = run(data); - let r2 = run(data); + let r0 = run_asm(data); + let r1 = run_asm(data); + let r2 = run_asm(data); + let r3 = run_int(data); assert_eq!(r0, r1); assert_eq!(r1, r2); + if r2.is_ok() { + assert_eq!(r2.unwrap(), r3.unwrap()); + } else { + assert!(r3.is_err()) + } }); diff --git a/src/machine/asm/execute_aarch64.S b/src/machine/asm/execute_aarch64.S index bd7068b8..1d2b36c8 100644 --- a/src/machine/asm/execute_aarch64.S +++ b/src/machine/asm/execute_aarch64.S @@ -75,17 +75,17 @@ str v, REGISTER_ADDRESS(RD) SEP \ str ZERO_VALUE, ZERO_ADDRESS +#define WRITE_RD_V2(v) \ + str v, REGISTER_ADDRESS(RD) + #define WRITE_RS1(v) \ - str v, REGISTER_ADDRESS(RS1) SEP \ - str ZERO_VALUE, ZERO_ADDRESS + str v, REGISTER_ADDRESS(RS1) #define WRITE_RS2(v) \ - str v, REGISTER_ADDRESS(RS2) SEP \ - str ZERO_VALUE, ZERO_ADDRESS + str v, REGISTER_ADDRESS(RS2) #define WRITE_RS3(v) \ - str v, REGISTER_ADDRESS(RS3) SEP \ - str ZERO_VALUE, ZERO_ADDRESS + str v, REGISTER_ADDRESS(RS3) #define NEXT_INST \ ldr TEMP1, [INST_ARGS] SEP \ @@ -96,6 +96,10 @@ add INST_PC, INST_PC, 8 SEP \ br TEMP2 +#define NEXT_INST_V2 \ + str ZERO_VALUE, ZERO_ADDRESS SEP \ + NEXT_INST + #define DECODE_R \ ubfx RS1, TEMP1, 0, 8 SEP \ ubfx RS2, TEMP1, 8, 8 @@ -1892,9 +1896,9 @@ ckb_vm_x64_execute: ldr TEMP4, REGISTER_ADDRESS(RS2) adds TEMP3, TEMP3, TEMP4 adc TEMP1, TEMP1, TEMP1 - WRITE_RD(TEMP3) + WRITE_RD_V2(TEMP3) WRITE_RS3(TEMP1) - NEXT_INST + NEXT_INST_V2 .CKB_VM_ASM_LABEL_OP_SBBS: DECODE_R4 mov TEMP1, 0 @@ -1903,9 +1907,9 @@ ckb_vm_x64_execute: subs TEMP3, TEMP3, TEMP4 adc TEMP1, TEMP1, TEMP1 eor TEMP1, TEMP1, 1 - WRITE_RD(TEMP3) + WRITE_RD_V2(TEMP3) WRITE_RS3(TEMP1) - NEXT_INST + NEXT_INST_V2 .CKB_VM_ASM_LABEL_OP_ADD3A: DECODE_R5 mov TEMP2, 0 @@ -1916,10 +1920,10 @@ ckb_vm_x64_execute: mov TEMP3, TEMP2 ldr TEMP1, REGISTER_ADDRESS(RS4_TEMP5) add TEMP3, TEMP3, TEMP1 - WRITE_RD(TEMP4) + WRITE_RD_V2(TEMP4) WRITE_RS2(TEMP2) WRITE_RS3(TEMP3) - NEXT_INST + NEXT_INST_V2 .CKB_VM_ASM_LABEL_OP_ADD3B: DECODE_R5 mov TEMP2, 0 @@ -1930,10 +1934,10 @@ ckb_vm_x64_execute: mov TEMP3, TEMP2 ldr TEMP1, REGISTER_ADDRESS(RS4_TEMP5) add TEMP3, TEMP3, TEMP1 - WRITE_RD(TEMP4) + WRITE_RD_V2(TEMP4) WRITE_RS1(TEMP2) WRITE_RS3(TEMP3) - NEXT_INST + NEXT_INST_V2 .CKB_VM_ASM_LABEL_OP_ADD3C: DECODE_R5 mov TEMP3, 0 @@ -1942,9 +1946,9 @@ ckb_vm_x64_execute: ldr TEMP4, REGISTER_ADDRESS(RS4_TEMP5) adds TEMP1, TEMP1, TEMP2 adc TEMP3, TEMP3, TEMP4 - WRITE_RD(TEMP1) + WRITE_RD_V2(TEMP1) WRITE_RS3(TEMP3) - NEXT_INST + NEXT_INST_V2 .exit_max_cycles_exceeded: mov x0, CKB_VM_ASM_RET_MAX_CYCLES_EXCEEDED b .exit diff --git a/src/machine/asm/execute_x64.S b/src/machine/asm/execute_x64.S index dac27519..19cc35da 100644 --- a/src/machine/asm/execute_x64.S +++ b/src/machine/asm/execute_x64.S @@ -337,6 +337,9 @@ movq v, REGISTER_ADDRESS(RD); \ movq $0, ZERO_ADDRESS +#define WRITE_RD_V2(v) \ + movq v, REGISTER_ADDRESS(RD); \ + #define WRITE_RS1(v) \ movq v, REGISTER_ADDRESS(RS1); \ @@ -364,6 +367,10 @@ addq $8, INST_PC; \ jmp *TEMP1 +#define NEXT_INST_V2 \ + movq $0, ZERO_ADDRESS; \ + NEXT_INST + #define DECODE_R \ movzbl %cl, RS1d; \ movzbl %ch, RS2rd @@ -2281,9 +2288,9 @@ ckb_vm_x64_execute: movq REGISTER_ADDRESS(RS1), %rcx addq REGISTER_ADDRESS(RS2r), %rcx adc $0, TEMP1 - WRITE_RD(%rcx) + WRITE_RD_V2(%rcx) WRITE_RS3(TEMP1) - NEXT_INST + NEXT_INST_V2 .p2align 3 .CKB_VM_ASM_LABEL_OP_SBBS: DECODE_R4 @@ -2291,9 +2298,9 @@ ckb_vm_x64_execute: movq REGISTER_ADDRESS(RS1), %rcx subq REGISTER_ADDRESS(RS2r), %rcx adc $0, TEMP1 - WRITE_RD(%rcx) + WRITE_RD_V2(%rcx) WRITE_RS3(TEMP1) - NEXT_INST + NEXT_INST_V2 .p2align 3 .CKB_VM_ASM_LABEL_OP_ADD3A: DECODE_R5 @@ -2303,10 +2310,10 @@ ckb_vm_x64_execute: adc $0, TEMP2 movq TEMP2, TEMP3 addq REGISTER_ADDRESS(RS4_TEMP1), TEMP3 - WRITE_RD(%rcx) + WRITE_RD_V2(%rcx) WRITE_RS2r(TEMP2) WRITE_RS3(TEMP3) - NEXT_INST + NEXT_INST_V2 .p2align 3 .CKB_VM_ASM_LABEL_OP_ADD3B: DECODE_R5 @@ -2316,10 +2323,10 @@ ckb_vm_x64_execute: adc $0, TEMP2 movq TEMP2, TEMP3 addq REGISTER_ADDRESS(RS4_TEMP1), TEMP3 - WRITE_RD(%rcx) + WRITE_RD_V2(%rcx) WRITE_RS1(TEMP2) WRITE_RS3(TEMP3) - NEXT_INST + NEXT_INST_V2 .p2align 3 .CKB_VM_ASM_LABEL_OP_ADD3C: DECODE_R5 @@ -2327,9 +2334,9 @@ ckb_vm_x64_execute: movq REGISTER_ADDRESS(RS1), %rcx addq REGISTER_ADDRESS(RS2r), %rcx adcq REGISTER_ADDRESS(RS4_TEMP1), TEMP3 - WRITE_RD(%rcx) + WRITE_RD_V2(%rcx) WRITE_RS3(TEMP3) - NEXT_INST + NEXT_INST_V2 .p2align 3 .exit_out_of_bound: mov $CKB_VM_ASM_RET_OUT_OF_BOUND, ARG_RETd