diff --git a/python/port/genhdr/mpversion.h b/python/port/genhdr/mpversion.h index d3f493cf8..41fb02eec 100644 --- a/python/port/genhdr/mpversion.h +++ b/python/port/genhdr/mpversion.h @@ -1,4 +1,4 @@ -#define MICROPY_VERSION_MAJOR (1) -#define MICROPY_VERSION_MINOR (9) -#define MICROPY_VERSION_MICRO (4) -#define MICROPY_VERSION_STRING "1.9.4" +// This file was generated by py/makeversionhdr.py +#define MICROPY_GIT_TAG "v1.11-dirty" +#define MICROPY_GIT_HASH "6f75c4f3c-dirty" +#define MICROPY_BUILD_DATE "2019-08-21" diff --git a/python/src/extmod/modurandom.c b/python/src/extmod/modurandom.c index a8d087c49..2e667570d 100644 --- a/python/src/extmod/modurandom.c +++ b/python/src/extmod/modurandom.c @@ -200,7 +200,6 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_2(mod_urandom_uniform_obj, mod_urandom_uniform); #endif // MICROPY_PY_URANDOM_EXTRA_FUNCS - #ifdef MICROPY_PY_URANDOM_SEED_INIT_FUNC STATIC mp_obj_t mod_urandom___init__() { mod_urandom_seed(MP_OBJ_NEW_SMALL_INT(MICROPY_PY_URANDOM_SEED_INIT_FUNC)); diff --git a/python/src/py/argcheck.c b/python/src/py/argcheck.c index d53bca73a..c2b1b6c07 100644 --- a/python/src/py/argcheck.c +++ b/python/src/py/argcheck.c @@ -29,14 +29,19 @@ #include "py/runtime.h" -void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw) { +void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig) { // TODO maybe take the function name as an argument so we can print nicer error messages + // The reverse of MP_OBJ_FUN_MAKE_SIG + bool takes_kw = sig & 1; + size_t n_args_min = sig >> 17; + size_t n_args_max = (sig >> 1) & 0xffff; + if (n_kw && !takes_kw) { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { mp_arg_error_terse_mismatch(); } else { - mp_raise_TypeError("function does not take keyword arguments"); + mp_raise_TypeError("function doesn't take keyword arguments"); } } diff --git a/python/src/py/asmarm.c b/python/src/py/asmarm.c index 1a8923bc2..f2221f8a9 100644 --- a/python/src/py/asmarm.c +++ b/python/src/py/asmarm.c @@ -197,7 +197,16 @@ void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) { emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src)); } -void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) { +size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) { + // Insert immediate into code and jump over it + emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc] + emit_al(as, 0xa000000); // b pc + size_t loc = mp_asm_base_get_code_pos(&as->base); + emit(as, imm); + return loc; +} + +void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm) { // TODO: There are more variants of immediate values if ((imm & 0xFF) == imm) { emit_al(as, asm_arm_op_mov_imm(rd, imm)); @@ -205,10 +214,7 @@ void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) { // mvn is "move not", not "move negative" emit_al(as, asm_arm_op_mvn_imm(rd, ~imm)); } else { - //Insert immediate into code and jump over it - emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc] - emit_al(as, 0xa000000); // b pc - emit(as, imm); + asm_arm_mov_reg_i32(as, rd, imm); } } @@ -273,6 +279,21 @@ void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) { emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2)); } +void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label) { + assert(label < as->base.max_num_labels); + mp_uint_t dest = as->base.label_offsets[label]; + mp_int_t rel = dest - as->base.code_offset; + rel -= 12 + 8; // adjust for load of rel, and then PC+8 prefetch of add_reg_reg_reg + + // To load rel int reg_dest, insert immediate into code and jump over it + emit_al(as, 0x59f0000 | (reg_dest << 12)); // ldr rd, [pc] + emit_al(as, 0xa000000); // b pc + emit(as, rel); + + // Do reg_dest += PC + asm_arm_add_reg_reg_reg(as, reg_dest, reg_dest, ASM_ARM_REG_PC); +} + void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) { // mov rd, rd, lsl rs emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd); @@ -347,19 +368,15 @@ void asm_arm_b_label(asm_arm_t *as, uint label) { asm_arm_bcc_label(as, ASM_ARM_CC_AL, label); } -void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp) { - // If the table offset fits into the ldr instruction - if (fun_id < (0x1000 / 4)) { - emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc - emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4] - return; - } +void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp) { + // The table offset should fit into the ldr instruction + assert(fun_id < (0x1000 / 4)); + emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc + emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4] +} - emit_al(as, 0x59f0004 | (reg_temp << 12)); // ldr rd, [pc, #4] - // Set lr after fun_ptr - emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_LR, ASM_ARM_REG_PC, 4)); // add lr, pc, #4 - emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_PC, reg_temp)); // mov pc, reg_temp - emit(as, (uint) fun_ptr); +void asm_arm_bx_reg(asm_arm_t *as, uint reg_src) { + emit_al(as, 0x012fff10 | reg_src); } #endif // MICROPY_EMIT_ARM diff --git a/python/src/py/asmarm.h b/python/src/py/asmarm.h index 871e35820..825fd8840 100644 --- a/python/src/py/asmarm.h +++ b/python/src/py/asmarm.h @@ -81,7 +81,8 @@ void asm_arm_bkpt(asm_arm_t *as); // mov void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src); -void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm); +size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm); +void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm); void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd); void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num); void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond); @@ -98,6 +99,7 @@ void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num); +void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label); void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs); void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs); @@ -120,7 +122,11 @@ void asm_arm_pop(asm_arm_t *as, uint reglist); // control flow void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label); void asm_arm_b_label(asm_arm_t *as, uint label); -void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp); +void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp); +void asm_arm_bx_reg(asm_arm_t *as, uint reg_src); + +// Holds a pointer to mp_fun_table +#define ASM_ARM_REG_FUN_TABLE ASM_ARM_REG_R7 #if GENERIC_ASM_API @@ -144,18 +150,21 @@ void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp); #define REG_LOCAL_3 ASM_ARM_REG_R6 #define REG_LOCAL_NUM (3) +// Holds a pointer to mp_fun_table +#define REG_FUN_TABLE ASM_ARM_REG_FUN_TABLE + #define ASM_T asm_arm_t #define ASM_END_PASS asm_arm_end_pass #define ASM_ENTRY asm_arm_entry #define ASM_EXIT asm_arm_exit #define ASM_JUMP asm_arm_b_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \ do { \ asm_arm_cmp_reg_i8(as, reg, 0); \ asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \ } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \ do { \ asm_arm_cmp_reg_i8(as, reg, 0); \ asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \ @@ -165,14 +174,17 @@ void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp); asm_arm_cmp_reg_reg(as, reg1, reg2); \ asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \ } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, ASM_ARM_REG_R3) +#define ASM_JUMP_REG(as, reg) asm_arm_bx_reg((as), (reg)) +#define ASM_CALL_IND(as, idx) asm_arm_bl_ind(as, idx, ASM_ARM_REG_R3) #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_arm_mov_local_reg((as), (local_num), (reg_src)) -#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32_optimised((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_arm_mov_reg_local((as), (reg_dest), (local_num)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_arm_mov_reg_local_addr((as), (reg_dest), (local_num)) +#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_arm_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift)) #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift)) diff --git a/python/src/py/asmthumb.c b/python/src/py/asmthumb.c index c5b45f2f5..68fb8f29e 100644 --- a/python/src/py/asmthumb.c +++ b/python/src/py/asmthumb.c @@ -33,9 +33,13 @@ // wrapper around everything in this file #if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB +#include "py/mpstate.h" +#include "py/persistentcode.h" #include "py/mphal.h" #include "py/asmthumb.h" +#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32) +#define UNSIGNED_FIT7(x) ((uint32_t)(x) < 128) #define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0) #define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0) #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80) @@ -43,6 +47,15 @@ #define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800) #define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000) +// Note: these actually take an imm12 but the high-bit is not encoded here +#define OP_ADD_W_RRI_HI(reg_src) (0xf200 | (reg_src)) +#define OP_ADD_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff)) +#define OP_SUB_W_RRI_HI(reg_src) (0xf2a0 | (reg_src)) +#define OP_SUB_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff)) + +#define OP_LDR_W_HI(reg_base) (0xf8d0 | (reg_base)) +#define OP_LDR_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12)) + static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) { return mp_asm_base_get_cur_to_write_bytes(&as->base, n); } @@ -51,7 +64,7 @@ void asm_thumb_end_pass(asm_thumb_t *as) { (void)as; // could check labels are resolved... - #if defined(MCU_SERIES_F7) + #if __ICACHE_PRESENT == 1 if (as->base.pass == MP_ASM_PASS_EMIT) { // flush D-cache, so the code emitted is stored in memory MP_HAL_CLEAN_DCACHE(as->base.code_base, as->base.code_size); @@ -89,6 +102,7 @@ STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) { #define OP_POP_RLIST(rlolist) (0xbc00 | (rlolist)) #define OP_POP_RLIST_PC(rlolist) (0xbc00 | 0x0100 | (rlolist)) +// The number of words must fit in 7 unsigned bits #define OP_ADD_SP(num_words) (0xb000 | (num_words)) #define OP_SUB_SP(num_words) (0xb080 | (num_words)) @@ -106,6 +120,21 @@ STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) { void asm_thumb_entry(asm_thumb_t *as, int num_locals) { assert(num_locals >= 0); + // If this Thumb machine code is run from ARM state then add a prelude + // to switch to Thumb state for the duration of the function. + #if MICROPY_DYNAMIC_COMPILER || MICROPY_EMIT_ARM || (defined(__arm__) && !defined(__thumb2__)) + #if MICROPY_DYNAMIC_COMPILER + if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_ARMV6) + #endif + { + asm_thumb_op32(as, 0x4010, 0xe92d); // push {r4, lr} + asm_thumb_op32(as, 0xe009, 0xe28f); // add lr, pc, 8 + 1 + asm_thumb_op32(as, 0xff3e, 0xe12f); // blx lr + asm_thumb_op32(as, 0x4010, 0xe8bd); // pop {r4, lr} + asm_thumb_op32(as, 0xff1e, 0xe12f); // bx lr + } + #endif + // work out what to push and how many extra spaces to reserve on stack // so that we have enough for all locals and it's aligned an 8-byte boundary // we push extra regs (r1, r2, r3) to help do the stack adjustment @@ -142,7 +171,11 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) { } asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist)); if (stack_adjust > 0) { - asm_thumb_op16(as, OP_SUB_SP(stack_adjust)); + if (UNSIGNED_FIT7(stack_adjust)) { + asm_thumb_op16(as, OP_SUB_SP(stack_adjust)); + } else { + asm_thumb_op32(as, OP_SUB_W_RRI_HI(ASM_THUMB_REG_SP), OP_SUB_W_RRI_LO(ASM_THUMB_REG_SP, stack_adjust * 4)); + } } as->push_reglist = reglist; as->stack_adjust = stack_adjust; @@ -150,7 +183,11 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) { void asm_thumb_exit(asm_thumb_t *as) { if (as->stack_adjust > 0) { - asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust)); + if (UNSIGNED_FIT7(as->stack_adjust)) { + asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust)); + } else { + asm_thumb_op32(as, OP_ADD_W_RRI_HI(ASM_THUMB_REG_SP), OP_ADD_W_RRI_LO(ASM_THUMB_REG_SP, as->stack_adjust * 4)); + } } asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist)); } @@ -205,10 +242,12 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) { } // if loading lo half with movw, the i16 value will be zero extended into the r32 register! -void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) { +size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) { assert(reg_dest < ASM_THUMB_REG_R15); + size_t loc = mp_asm_base_get_code_pos(&as->base); // mov[wt] reg_dest, #i16_src asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff)); + return loc; } #define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff)) @@ -251,12 +290,16 @@ bool asm_thumb_bl_label(asm_thumb_t *as, uint label) { return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel); } -void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) { +size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) { // movw, movt does it in 8 bytes // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw + size_t loc = mp_asm_base_get_code_pos(&as->base); + asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32); asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16); + + return loc; } void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) { @@ -269,21 +312,6 @@ void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) { } } -// i32 is stored as a full word in the code, and aligned to machine-word boundary -// TODO this is very inefficient, improve it! -void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) { - // align on machine-word + 2 - if ((as->base.code_offset & 3) == 0) { - asm_thumb_op16(as, ASM_THUMB_OP_NOP); - } - // jump over the i32 value (instruction prefetch adds 2 to PC) - asm_thumb_op16(as, OP_B_N(2)); - // store i32 on machine-word aligned boundary - mp_asm_base_data(&as->base, 4, i32); - // do the actual load of the i32 value - asm_thumb_mov_reg_i32_optimised(as, reg_dest, i32); -} - #define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff)) #define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff)) @@ -310,6 +338,27 @@ void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset)); } +void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label) { + mp_uint_t dest = get_label_dest(as, label); + mp_int_t rel = dest - as->base.code_offset; + rel -= 4 + 4; // adjust for mov_reg_i16 and then PC+4 prefetch of add_reg_reg + rel |= 1; // to stay in Thumb state when jumping to this address + asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, rlo_dest, rel); // 4 bytes + asm_thumb_add_reg_reg(as, rlo_dest, ASM_THUMB_REG_R15); // 2 bytes +} + +static inline void asm_thumb_ldr_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) { + asm_thumb_op32(as, OP_LDR_W_HI(reg_base), OP_LDR_W_LO(reg_dest, word_offset * 4)); +} + +void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) { + if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(word_offset)) { + asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_base, word_offset); + } else { + asm_thumb_ldr_reg_reg_i12(as, reg_dest, reg_base, word_offset); + } +} + // this could be wrong, because it should have a range of +/- 16MiB... #define OP_BW_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff)) #define OP_BW_LO(byte_offset) (0xb800 | (((byte_offset) >> 1) & 0x07ff)) @@ -355,25 +404,10 @@ void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) { #define OP_BLX(reg) (0x4780 | ((reg) << 3)) #define OP_SVC(arg) (0xdf00 | (arg)) -void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp) { - /* TODO make this use less bytes - uint rlo_base = ASM_THUMB_REG_R3; - uint rlo_dest = ASM_THUMB_REG_R7; - uint word_offset = 4; - asm_thumb_op16(as, 0x0000); - asm_thumb_op16(as, 0x6800 | (word_offset << 6) | (rlo_base << 3) | rlo_dest); // ldr rlo_dest, [rlo_base, #offset] - asm_thumb_op16(as, 0x4780 | (ASM_THUMB_REG_R9 << 3)); // blx reg - */ - - if (fun_id < 32) { - // load ptr to function from table, indexed by fun_id (must be in range 0-31); 4 bytes - asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, ASM_THUMB_REG_R7, fun_id)); - asm_thumb_op16(as, OP_BLX(reg_temp)); - } else { - // load ptr to function into register using immediate; 6 bytes - asm_thumb_mov_reg_i32(as, reg_temp, (mp_uint_t)fun_ptr); - asm_thumb_op16(as, OP_BLX(reg_temp)); - } +void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp) { + // Load ptr to function from table, indexed by fun_id, then call it + asm_thumb_ldr_reg_reg_i12_optimised(as, reg_temp, ASM_THUMB_REG_FUN_TABLE, fun_id); + asm_thumb_op16(as, OP_BLX(reg_temp)); } #endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB diff --git a/python/src/py/asmthumb.h b/python/src/py/asmthumb.h index 8a7df5d50..c21c23ff7 100644 --- a/python/src/py/asmthumb.h +++ b/python/src/py/asmthumb.h @@ -46,6 +46,7 @@ #define ASM_THUMB_REG_R13 (13) #define ASM_THUMB_REG_R14 (14) #define ASM_THUMB_REG_R15 (15) +#define ASM_THUMB_REG_SP (ASM_THUMB_REG_R13) #define ASM_THUMB_REG_LR (REG_R14) #define ASM_THUMB_CC_EQ (0x0) @@ -180,6 +181,26 @@ void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src); static inline void asm_thumb_cmp_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) { asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_CMP, rlo_dest, rlo_src); } +// FORMAT 5: hi register operations (add, cmp, mov, bx) +// For add/cmp/mov, at least one of the args must be a high register + +#define ASM_THUMB_FORMAT_5_ADD (0x4400) +#define ASM_THUMB_FORMAT_5_BX (0x4700) + +#define ASM_THUMB_FORMAT_5_ENCODE(op, r_dest, r_src) \ + ((op) | ((r_dest) << 4 & 0x0080) | ((r_src) << 3) | ((r_dest) & 0x0007)) + +static inline void asm_thumb_format_5(asm_thumb_t *as, uint op, uint r_dest, uint r_src) { + asm_thumb_op16(as, ASM_THUMB_FORMAT_5_ENCODE(op, r_dest, r_src)); +} + +static inline void asm_thumb_add_reg_reg(asm_thumb_t *as, uint r_dest, uint r_src) { + asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_ADD, r_dest, r_src); +} +static inline void asm_thumb_bx_reg(asm_thumb_t *as, uint r_src) { + asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_BX, 0, r_src); +} + // FORMAT 9: load/store with immediate offset // For word transfers the offset must be aligned, and >>2 @@ -220,23 +241,28 @@ static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uin #define ASM_THUMB_OP_MOVT (0xf2c0) void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src); -void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src); +size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src); // these return true if the destination is in range, false otherwise bool asm_thumb_b_n_label(asm_thumb_t *as, uint label); bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide); bool asm_thumb_bl_label(asm_thumb_t *as, uint label); -void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32_src); // convenience +size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32_src); // convenience void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience -void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32); // convenience void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); // convenience void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience +void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label); + +void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint byte_offset); // convenience void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narrow or wide branch void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch -void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp); // convenience +void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenience + +// Holds a pointer to mp_fun_table +#define ASM_THUMB_REG_FUN_TABLE ASM_THUMB_REG_R7 #if GENERIC_ASM_API @@ -261,18 +287,20 @@ void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp #define REG_LOCAL_3 ASM_THUMB_REG_R6 #define REG_LOCAL_NUM (3) +#define REG_FUN_TABLE ASM_THUMB_REG_FUN_TABLE + #define ASM_T asm_thumb_t #define ASM_END_PASS asm_thumb_end_pass #define ASM_ENTRY asm_thumb_entry #define ASM_EXIT asm_thumb_exit #define ASM_JUMP asm_thumb_b_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \ do { \ asm_thumb_cmp_rlo_i8(as, reg, 0); \ asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \ } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \ do { \ asm_thumb_cmp_rlo_i8(as, reg, 0); \ asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \ @@ -282,14 +310,17 @@ void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \ asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \ } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, ASM_THUMB_REG_R3) +#define ASM_JUMP_REG(as, reg) asm_thumb_bx_reg((as), (reg)) +#define ASM_CALL_IND(as, idx) asm_thumb_bl_ind(as, idx, ASM_THUMB_REG_R3) #define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_aligned((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_reg_i16((as), ASM_THUMB_OP_MOVW, (reg_dest), (imm)) +#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_thumb_mov_reg_i32((as), (reg_dest), (imm)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_thumb_mov_reg_local_addr((as), (reg_dest), (local_num)) +#define ASM_MOV_REG_PCREL(as, rlo_dest, label) asm_thumb_mov_reg_pcrel((as), (rlo_dest), (label)) #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift)) #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift)) @@ -301,7 +332,7 @@ void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp #define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src)) #define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) -#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), (word_offset)) +#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_reg_reg_i12_optimised((as), (reg_dest), (reg_base), (word_offset)) #define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) #define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) #define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0) diff --git a/python/src/py/asmx64.c b/python/src/py/asmx64.c index c900a08d1..b18703a9c 100644 --- a/python/src/py/asmx64.c +++ b/python/src/py/asmx64.c @@ -73,8 +73,10 @@ #define OPCODE_CMP_R64_WITH_RM64 (0x39) /* /r */ //#define OPCODE_CMP_RM32_WITH_R32 (0x3b) #define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */ +#define OPCODE_TEST_R64_WITH_RM64 (0x85) /* /r */ #define OPCODE_JMP_REL8 (0xeb) #define OPCODE_JMP_REL32 (0xe9) +#define OPCODE_JMP_RM64 (0xff) /* /4 */ #define OPCODE_JCC_REL8 (0x70) /* | jcc type */ #define OPCODE_JCC_REL32_A (0x0f) #define OPCODE_JCC_REL32_B (0x80) /* | jcc type */ @@ -181,21 +183,22 @@ STATIC void asm_x64_write_word32_to(asm_x64_t *as, int offset, int w32) { */ STATIC void asm_x64_write_r64_disp(asm_x64_t *as, int r64, int disp_r64, int disp_offset) { - assert(disp_r64 != ASM_X64_REG_RSP); - - if (disp_r64 == ASM_X64_REG_R12) { - // special case for r12; not fully implemented - assert(SIGNED_FIT8(disp_offset)); - asm_x64_write_byte_3(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), 0x24, IMM32_L0(disp_offset)); - return; - } - - if (disp_offset == 0 && disp_r64 != ASM_X64_REG_RBP) { - asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP0 | MODRM_RM_R64(disp_r64)); + uint8_t rm_disp; + if (disp_offset == 0 && (disp_r64 & 7) != ASM_X64_REG_RBP) { + rm_disp = MODRM_RM_DISP0; } else if (SIGNED_FIT8(disp_offset)) { - asm_x64_write_byte_2(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), IMM32_L0(disp_offset)); + rm_disp = MODRM_RM_DISP8; } else { - asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP32 | MODRM_RM_R64(disp_r64)); + rm_disp = MODRM_RM_DISP32; + } + asm_x64_write_byte_1(as, MODRM_R64(r64) | rm_disp | MODRM_RM_R64(disp_r64)); + if ((disp_r64 & 7) == ASM_X64_REG_RSP) { + // Special case for rsp and r12, they need a SIB byte + asm_x64_write_byte_1(as, 0x24); + } + if (rm_disp == MODRM_RM_DISP8) { + asm_x64_write_byte_1(as, IMM32_L0(disp_offset)); + } else if (rm_disp == MODRM_RM_DISP32) { asm_x64_write_word32(as, disp_offset); } } @@ -331,14 +334,16 @@ void asm_x64_mov_i8_to_r8(asm_x64_t *as, int src_i8, int dest_r64) { } */ -STATIC void asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64) { +size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64) { // cpu defaults to i32 to r64, with zero extension if (dest_r64 < 8) { asm_x64_write_byte_1(as, OPCODE_MOV_I64_TO_R64 | dest_r64); } else { asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_MOV_I64_TO_R64 | (dest_r64 & 7)); } + size_t loc = mp_asm_base_get_code_pos(&as->base); asm_x64_write_word32(as, src_i32); + return loc; } void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64) { @@ -361,15 +366,6 @@ void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r } } -// src_i64 is stored as a full word in the code, and aligned to machine-word boundary -void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64) { - // mov instruction uses 2 bytes for the instruction, before the i64 - while (((as->base.code_offset + 2) & (WORD_SIZE - 1)) != 0) { - asm_x64_nop(as); - } - asm_x64_mov_i64_to_r64(as, src_i64, dest_r64); -} - void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) { asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64); } @@ -465,17 +461,25 @@ void asm_x64_cmp_i32_with_r32(asm_x64_t *as, int src_i32, int src_r32) { */ void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b) { - // TODO implement for other registers - assert(src_r64_a == ASM_X64_REG_RAX); - assert(src_r64_b == ASM_X64_REG_RAX); + assert(src_r64_a < 8); + assert(src_r64_b < 8); asm_x64_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b)); } +void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b) { + asm_x64_generic_r64_r64(as, src_r64_b, src_r64_a, OPCODE_TEST_R64_WITH_RM64); +} + void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8) { assert(dest_r8 < 8); asm_x64_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r8)); } +void asm_x64_jmp_reg(asm_x64_t *as, int src_r64) { + assert(src_r64 < 8); + asm_x64_write_byte_2(as, OPCODE_JMP_RM64, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(src_r64)); +} + STATIC mp_uint_t get_label_dest(asm_x64_t *as, mp_uint_t label) { assert(label < as->base.max_num_labels); return as->base.label_offsets[label]; @@ -528,63 +532,72 @@ void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label) { void asm_x64_entry(asm_x64_t *as, int num_locals) { assert(num_locals >= 0); asm_x64_push_r64(as, ASM_X64_REG_RBP); - asm_x64_mov_r64_r64(as, ASM_X64_REG_RBP, ASM_X64_REG_RSP); - num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary - asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, num_locals * WORD_SIZE); asm_x64_push_r64(as, ASM_X64_REG_RBX); asm_x64_push_r64(as, ASM_X64_REG_R12); asm_x64_push_r64(as, ASM_X64_REG_R13); + num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary + asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, num_locals * WORD_SIZE); as->num_locals = num_locals; } void asm_x64_exit(asm_x64_t *as) { + asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, -as->num_locals * WORD_SIZE); asm_x64_pop_r64(as, ASM_X64_REG_R13); asm_x64_pop_r64(as, ASM_X64_REG_R12); asm_x64_pop_r64(as, ASM_X64_REG_RBX); - asm_x64_write_byte_1(as, OPCODE_LEAVE); + asm_x64_pop_r64(as, ASM_X64_REG_RBP); asm_x64_ret(as); } // locals: // - stored on the stack in ascending order // - numbered 0 through as->num_locals-1 -// - RBP points above the last local +// - RSP points to the first local // -// | RBP -// v +// | RSP +// v // l0 l1 l2 ... l(n-1) // ^ ^ // | low address | high address in RAM // -STATIC int asm_x64_local_offset_from_ebp(asm_x64_t *as, int local_num) { - return (-as->num_locals + local_num) * WORD_SIZE; +STATIC int asm_x64_local_offset_from_rsp(asm_x64_t *as, int local_num) { + (void)as; + // Stack is full descending, RSP points to local0 + return local_num * WORD_SIZE; } void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64) { - asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, src_local_num), dest_r64); + asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, src_local_num), dest_r64); } void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num) { - asm_x64_mov_r64_to_mem64(as, src_r64, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, dest_local_num)); + asm_x64_mov_r64_to_mem64(as, src_r64, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, dest_local_num)); } void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64) { - int offset = asm_x64_local_offset_from_ebp(as, local_num); + int offset = asm_x64_local_offset_from_rsp(as, local_num); if (offset == 0) { - asm_x64_mov_r64_r64(as, dest_r64, ASM_X64_REG_RBP); + asm_x64_mov_r64_r64(as, dest_r64, ASM_X64_REG_RSP); } else { - asm_x64_lea_disp_to_r64(as, ASM_X64_REG_RBP, offset, dest_r64); + asm_x64_lea_disp_to_r64(as, ASM_X64_REG_RSP, offset, dest_r64); } } +void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label) { + mp_uint_t dest = get_label_dest(as, label); + mp_int_t rel = dest - (as->base.code_offset + 7); + asm_x64_write_byte_3(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64), OPCODE_LEA_MEM_TO_R64, MODRM_R64(dest_r64) | MODRM_RM_R64(5)); + asm_x64_write_word32(as, rel); +} + /* void asm_x64_push_local(asm_x64_t *as, int local_num) { - asm_x64_push_disp(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, local_num)); + asm_x64_push_disp(as, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, local_num)); } void asm_x64_push_local_addr(asm_x64_t *as, int local_num, int temp_r64) { - asm_x64_mov_r64_r64(as, temp_r64, ASM_X64_REG_RBP); - asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_ebp(as, local_num), temp_r64); + asm_x64_mov_r64_r64(as, temp_r64, ASM_X64_REG_RSP); + asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_rsp(as, local_num), temp_r64); asm_x64_push_r64(as, temp_r64); } */ @@ -610,21 +623,10 @@ void asm_x64_call_i1(asm_x64_t *as, void* func, int i1) { } */ -void asm_x64_call_ind(asm_x64_t *as, void *ptr, int temp_r64) { +void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r64) { assert(temp_r64 < 8); -#ifdef __LP64__ - asm_x64_mov_i64_to_r64_optimised(as, (int64_t)ptr, temp_r64); -#else - // If we get here, sizeof(int) == sizeof(void*). - asm_x64_mov_i64_to_r64_optimised(as, (int64_t)(unsigned int)ptr, temp_r64); -#endif + asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_FUN_TABLE, fun_id * WORD_SIZE, temp_r64); asm_x64_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R64(2) | MODRM_RM_REG | MODRM_RM_R64(temp_r64)); - // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all - // doesn't work anymore because calls are 64 bits away - /* - asm_x64_write_byte_1(as, OPCODE_CALL_REL32); - asm_x64_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4)); - */ } #endif // MICROPY_EMIT_X64 diff --git a/python/src/py/asmx64.h b/python/src/py/asmx64.h index 2fbbfa9ff..d3761b78f 100644 --- a/python/src/py/asmx64.h +++ b/python/src/py/asmx64.h @@ -83,9 +83,9 @@ void asm_x64_nop(asm_x64_t* as); void asm_x64_push_r64(asm_x64_t* as, int src_r64); void asm_x64_pop_r64(asm_x64_t* as, int dest_r64); void asm_x64_mov_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); +size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64); void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64); void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64); -void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64); void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp); @@ -104,7 +104,9 @@ void asm_x64_sub_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); void asm_x64_mul_r64_r64(asm_x64_t* as, int dest_r64, int src_r64); void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b); void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b); +void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b); void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8); +void asm_x64_jmp_reg(asm_x64_t *as, int src_r64); void asm_x64_jmp_label(asm_x64_t* as, mp_uint_t label); void asm_x64_jcc_label(asm_x64_t* as, int jcc_type, mp_uint_t label); void asm_x64_entry(asm_x64_t* as, int num_locals); @@ -112,7 +114,11 @@ void asm_x64_exit(asm_x64_t* as); void asm_x64_mov_local_to_r64(asm_x64_t* as, int src_local_num, int dest_r64); void asm_x64_mov_r64_to_local(asm_x64_t* as, int src_r64, int dest_local_num); void asm_x64_mov_local_addr_to_r64(asm_x64_t* as, int local_num, int dest_r64); -void asm_x64_call_ind(asm_x64_t* as, void* ptr, int temp_r32); +void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label); +void asm_x64_call_ind(asm_x64_t* as, size_t fun_id, int temp_r32); + +// Holds a pointer to mp_fun_table +#define ASM_X64_REG_FUN_TABLE ASM_X64_REG_RBP #if GENERIC_ASM_API @@ -139,20 +145,31 @@ void asm_x64_call_ind(asm_x64_t* as, void* ptr, int temp_r32); #define REG_LOCAL_3 ASM_X64_REG_R13 #define REG_LOCAL_NUM (3) +// Holds a pointer to mp_fun_table +#define REG_FUN_TABLE ASM_X64_REG_FUN_TABLE + #define ASM_T asm_x64_t #define ASM_END_PASS asm_x64_end_pass #define ASM_ENTRY asm_x64_entry #define ASM_EXIT asm_x64_exit #define ASM_JUMP asm_x64_jmp_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \ do { \ - asm_x64_test_r8_with_r8(as, reg, reg); \ + if (bool_test) { \ + asm_x64_test_r8_with_r8((as), (reg), (reg)); \ + } else { \ + asm_x64_test_r64_with_r64((as), (reg), (reg)); \ + } \ asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \ } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \ do { \ - asm_x64_test_r8_with_r8(as, reg, reg); \ + if (bool_test) { \ + asm_x64_test_r8_with_r8((as), (reg), (reg)); \ + } else { \ + asm_x64_test_r64_with_r64((as), (reg), (reg)); \ + } \ asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \ } while (0) #define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ @@ -160,14 +177,17 @@ void asm_x64_call_ind(asm_x64_t* as, void* ptr, int temp_r32); asm_x64_cmp_r64_with_r64(as, reg1, reg2); \ asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \ } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, ASM_X64_REG_RAX) +#define ASM_JUMP_REG(as, reg) asm_x64_jmp_reg((as), (reg)) +#define ASM_CALL_IND(as, idx) asm_x64_call_ind(as, idx, ASM_X64_REG_RAX) #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x64_mov_r64_to_local((as), (reg_src), (local_num)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_optimised((as), (imm), (reg_dest)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_aligned((as), (imm), (reg_dest)) +#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_x64_mov_i32_to_r64((as), (imm), (reg_dest)) +#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_x64_mov_i32_to_r64((as), (imm), (reg_dest)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x64_mov_local_to_r64((as), (local_num), (reg_dest)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x64_mov_local_addr_to_r64((as), (local_num), (reg_dest)) +#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x64_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg)) #define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg)) diff --git a/python/src/py/asmx86.c b/python/src/py/asmx86.c index 3938baaac..23160c9c2 100644 --- a/python/src/py/asmx86.c +++ b/python/src/py/asmx86.c @@ -73,8 +73,10 @@ #define OPCODE_CMP_R32_WITH_RM32 (0x39) //#define OPCODE_CMP_RM32_WITH_R32 (0x3b) #define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */ +#define OPCODE_TEST_R32_WITH_RM32 (0x85) /* /r */ #define OPCODE_JMP_REL8 (0xeb) #define OPCODE_JMP_REL32 (0xe9) +#define OPCODE_JMP_RM32 (0xff) /* /4 */ #define OPCODE_JCC_REL8 (0x70) /* | jcc type */ #define OPCODE_JCC_REL32_A (0x0f) #define OPCODE_JCC_REL32_B (0x80) /* | jcc type */ @@ -135,14 +137,22 @@ STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) { } STATIC void asm_x86_write_r32_disp(asm_x86_t *as, int r32, int disp_r32, int disp_offset) { - assert(disp_r32 != ASM_X86_REG_ESP); - + uint8_t rm_disp; if (disp_offset == 0 && disp_r32 != ASM_X86_REG_EBP) { - asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP0 | MODRM_RM_R32(disp_r32)); + rm_disp = MODRM_RM_DISP0; } else if (SIGNED_FIT8(disp_offset)) { - asm_x86_write_byte_2(as, MODRM_R32(r32) | MODRM_RM_DISP8 | MODRM_RM_R32(disp_r32), IMM32_L0(disp_offset)); + rm_disp = MODRM_RM_DISP8; } else { - asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP32 | MODRM_RM_R32(disp_r32)); + rm_disp = MODRM_RM_DISP32; + } + asm_x86_write_byte_1(as, MODRM_R32(r32) | rm_disp | MODRM_RM_R32(disp_r32)); + if (disp_r32 == ASM_X86_REG_ESP) { + // Special case for esp, it needs a SIB byte + asm_x86_write_byte_1(as, 0x24); + } + if (rm_disp == MODRM_RM_DISP8) { + asm_x86_write_byte_1(as, IMM32_L0(disp_offset)); + } else if (rm_disp == MODRM_RM_DISP32) { asm_x86_write_word32(as, disp_offset); } } @@ -151,9 +161,11 @@ STATIC void asm_x86_generic_r32_r32(asm_x86_t *as, int dest_r32, int src_r32, in asm_x86_write_byte_2(as, op, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32)); } +#if 0 STATIC void asm_x86_nop(asm_x86_t *as) { asm_x86_write_byte_1(as, OPCODE_NOP); } +#endif STATIC void asm_x86_push_r32(asm_x86_t *as, int src_r32) { asm_x86_write_byte_1(as, OPCODE_PUSH_R32 | src_r32); @@ -224,18 +236,11 @@ void asm_x86_mov_i8_to_r8(asm_x86_t *as, int src_i8, int dest_r32) { } #endif -void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) { +size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) { asm_x86_write_byte_1(as, OPCODE_MOV_I32_TO_R32 | dest_r32); + size_t loc = mp_asm_base_get_code_pos(&as->base); asm_x86_write_word32(as, src_i32); -} - -// src_i32 is stored as a full word in the code, and aligned to machine-word boundary -void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32) { - // mov instruction uses 1 byte for the instruction, before the i32 - while (((as->base.code_offset + 1) & (WORD_SIZE - 1)) != 0) { - asm_x86_nop(as); - } - asm_x86_mov_i32_to_r32(as, src_i32, dest_r32); + return loc; } void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) { @@ -312,7 +317,7 @@ void asm_x86_sar_r32_by_imm(asm_x86_t *as, int r32, int imm) { #endif void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) { - asm_x86_write_byte_2(as, OPCODE_CMP_R32_WITH_RM32, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b)); + asm_x86_generic_r32_r32(as, src_r32_b, src_r32_a, OPCODE_CMP_R32_WITH_RM32); } #if 0 @@ -328,16 +333,21 @@ void asm_x86_cmp_i32_with_r32(asm_x86_t *as, int src_i32, int src_r32) { #endif void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b) { - // TODO implement for other registers - assert(src_r32_a == ASM_X86_REG_EAX); - assert(src_r32_b == ASM_X86_REG_EAX); asm_x86_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b)); } +void asm_x86_test_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) { + asm_x86_generic_r32_r32(as, src_r32_b, src_r32_a, OPCODE_TEST_R32_WITH_RM32); +} + void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) { asm_x86_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r8)); } +void asm_x86_jmp_reg(asm_x86_t *as, int src_r32) { + asm_x86_write_byte_2(as, OPCODE_JMP_RM32, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(src_r32)); +} + STATIC mp_uint_t get_label_dest(asm_x86_t *as, mp_uint_t label) { assert(label < as->base.max_num_labels); return as->base.label_offsets[label]; @@ -390,87 +400,103 @@ void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) { void asm_x86_entry(asm_x86_t *as, int num_locals) { assert(num_locals >= 0); asm_x86_push_r32(as, ASM_X86_REG_EBP); - asm_x86_mov_r32_r32(as, ASM_X86_REG_EBP, ASM_X86_REG_ESP); - if (num_locals > 0) { - asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, num_locals * WORD_SIZE); - } asm_x86_push_r32(as, ASM_X86_REG_EBX); asm_x86_push_r32(as, ASM_X86_REG_ESI); asm_x86_push_r32(as, ASM_X86_REG_EDI); - // TODO align stack on 16-byte boundary + num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary + asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, num_locals * WORD_SIZE); as->num_locals = num_locals; } void asm_x86_exit(asm_x86_t *as) { + asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, -as->num_locals * WORD_SIZE); asm_x86_pop_r32(as, ASM_X86_REG_EDI); asm_x86_pop_r32(as, ASM_X86_REG_ESI); asm_x86_pop_r32(as, ASM_X86_REG_EBX); - asm_x86_write_byte_1(as, OPCODE_LEAVE); + asm_x86_pop_r32(as, ASM_X86_REG_EBP); asm_x86_ret(as); } +STATIC int asm_x86_arg_offset_from_esp(asm_x86_t *as, size_t arg_num) { + // Above esp are: locals, 4 saved registers, return eip, arguments + return (as->num_locals + 4 + 1 + arg_num) * WORD_SIZE; +} + #if 0 void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) { - asm_x86_push_disp(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE); + asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, src_arg_num)); } #endif void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) { - asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE, dest_r32); + asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, src_arg_num), dest_r32); } #if 0 void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) { - asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_EBP, 2 * WORD_SIZE + dest_arg_num * WORD_SIZE); + asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, dest_arg_num)); } #endif // locals: // - stored on the stack in ascending order // - numbered 0 through as->num_locals-1 -// - EBP points above the last local +// - ESP points to the first local // -// | EBP -// v +// | ESP +// v // l0 l1 l2 ... l(n-1) // ^ ^ // | low address | high address in RAM // -STATIC int asm_x86_local_offset_from_ebp(asm_x86_t *as, int local_num) { - return (-as->num_locals + local_num) * WORD_SIZE; +STATIC int asm_x86_local_offset_from_esp(asm_x86_t *as, int local_num) { + (void)as; + // Stack is full descending, ESP points to local0 + return local_num * WORD_SIZE; } void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) { - asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32); + asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, src_local_num), dest_r32); } void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) { - asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num)); + asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, dest_local_num)); } void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) { - int offset = asm_x86_local_offset_from_ebp(as, local_num); + int offset = asm_x86_local_offset_from_esp(as, local_num); if (offset == 0) { - asm_x86_mov_r32_r32(as, dest_r32, ASM_X86_REG_EBP); + asm_x86_mov_r32_r32(as, dest_r32, ASM_X86_REG_ESP); } else { - asm_x86_lea_disp_to_r32(as, ASM_X86_REG_EBP, offset, dest_r32); + asm_x86_lea_disp_to_r32(as, ASM_X86_REG_ESP, offset, dest_r32); } } +void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r32, mp_uint_t label) { + asm_x86_write_byte_1(as, OPCODE_CALL_REL32); + asm_x86_write_word32(as, 0); + mp_uint_t dest = get_label_dest(as, label); + mp_int_t rel = dest - as->base.code_offset; + asm_x86_pop_r32(as, dest_r32); + // PC rel is usually a forward reference, so need to assume it's large + asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32)); + asm_x86_write_word32(as, rel); +} + #if 0 void asm_x86_push_local(asm_x86_t *as, int local_num) { - asm_x86_push_disp(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, local_num)); + asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, local_num)); } void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32) { - asm_x86_mov_r32_r32(as, temp_r32, ASM_X86_REG_EBP); - asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_ebp(as, local_num), temp_r32); + asm_x86_mov_r32_r32(as, temp_r32, ASM_X86_REG_ESP); + asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_esp(as, local_num), temp_r32); asm_x86_push_r32(as, temp_r32); } #endif -void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32) { +void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32) { // TODO align stack on 16-byte boundary before the call assert(n_args <= 5); if (n_args > 4) { @@ -488,20 +514,10 @@ void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32) if (n_args > 0) { asm_x86_push_r32(as, ASM_X86_REG_ARG_1); } -#ifdef __LP64__ - // We wouldn't run x86 code on an x64 machine. This is here to enable - // testing of the x86 emitter only. - asm_x86_mov_i32_to_r32(as, (int32_t)(int64_t)ptr, temp_r32); -#else - // If we get here, sizeof(int) == sizeof(void*). - asm_x86_mov_i32_to_r32(as, (int32_t)ptr, temp_r32); -#endif + + // Load the pointer to the function and make the call + asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_FUN_TABLE, fun_id * WORD_SIZE, temp_r32); asm_x86_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R32(2) | MODRM_RM_REG | MODRM_RM_R32(temp_r32)); - // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all - /* - asm_x86_write_byte_1(as, OPCODE_CALL_REL32); - asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->base.code_offset + 4)); - */ // the caller must clean up the stack if (n_args > 0) { diff --git a/python/src/py/asmx86.h b/python/src/py/asmx86.h index 09559850c..7ba677b2c 100644 --- a/python/src/py/asmx86.h +++ b/python/src/py/asmx86.h @@ -83,8 +83,7 @@ static inline void asm_x86_end_pass(asm_x86_t *as) { } void asm_x86_mov_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); -void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32); -void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32); +size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32); void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp); @@ -101,7 +100,9 @@ void asm_x86_sub_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); void asm_x86_mul_r32_r32(asm_x86_t* as, int dest_r32, int src_r32); void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b); void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b); +void asm_x86_test_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b); void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8); +void asm_x86_jmp_reg(asm_x86_t *as, int src_r86); void asm_x86_jmp_label(asm_x86_t* as, mp_uint_t label); void asm_x86_jcc_label(asm_x86_t* as, mp_uint_t jcc_type, mp_uint_t label); void asm_x86_entry(asm_x86_t* as, int num_locals); @@ -110,7 +111,11 @@ void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32); void asm_x86_mov_local_to_r32(asm_x86_t* as, int src_local_num, int dest_r32); void asm_x86_mov_r32_to_local(asm_x86_t* as, int src_r32, int dest_local_num); void asm_x86_mov_local_addr_to_r32(asm_x86_t* as, int local_num, int dest_r32); -void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32); +void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r64, mp_uint_t label); +void asm_x86_call_ind(asm_x86_t* as, size_t fun_id, mp_uint_t n_args, int temp_r32); + +// Holds a pointer to mp_fun_table +#define ASM_X86_REG_FUN_TABLE ASM_X86_REG_EBP #if GENERIC_ASM_API @@ -137,20 +142,31 @@ void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32); #define REG_LOCAL_3 ASM_X86_REG_EDI #define REG_LOCAL_NUM (3) +// Holds a pointer to mp_fun_table +#define REG_FUN_TABLE ASM_X86_REG_FUN_TABLE + #define ASM_T asm_x86_t #define ASM_END_PASS asm_x86_end_pass #define ASM_ENTRY asm_x86_entry #define ASM_EXIT asm_x86_exit #define ASM_JUMP asm_x86_jmp_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \ do { \ - asm_x86_test_r8_with_r8(as, reg, reg); \ + if (bool_test) { \ + asm_x86_test_r8_with_r8(as, reg, reg); \ + } else { \ + asm_x86_test_r32_with_r32(as, reg, reg); \ + } \ asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \ } while (0) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \ do { \ - asm_x86_test_r8_with_r8(as, reg, reg); \ + if (bool_test) { \ + asm_x86_test_r8_with_r8(as, reg, reg); \ + } else { \ + asm_x86_test_r32_with_r32(as, reg, reg); \ + } \ asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \ } while (0) #define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ @@ -158,14 +174,17 @@ void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32); asm_x86_cmp_r32_with_r32(as, reg1, reg2); \ asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \ } while (0) -#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], ASM_X86_REG_EAX) +#define ASM_JUMP_REG(as, reg) asm_x86_jmp_reg((as), (reg)) +#define ASM_CALL_IND(as, idx) asm_x86_call_ind(as, idx, mp_f_n_args[idx], ASM_X86_REG_EAX) #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x86_mov_r32_to_local((as), (reg_src), (local_num)) #define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_x86_mov_i32_to_r32_aligned((as), (imm), (reg_dest)) +#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest)) +#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x86_mov_local_to_r32((as), (local_num), (reg_dest)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x86_mov_local_addr_to_r32((as), (local_num), (reg_dest)) +#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x86_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg)) #define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg)) diff --git a/python/src/py/asmxtensa.c b/python/src/py/asmxtensa.c index 00448dfc5..a269e5e7f 100644 --- a/python/src/py/asmxtensa.c +++ b/python/src/py/asmxtensa.c @@ -37,6 +37,7 @@ #define WORD_SIZE (4) #define SIGNED_FIT8(x) ((((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)) #define SIGNED_FIT12(x) ((((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)) +#define NUM_REGS_SAVED (5) void asm_xtensa_end_pass(asm_xtensa_t *as) { as->num_const = as->cur_const; @@ -67,26 +68,37 @@ void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) { mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte as->const_table = (uint32_t*)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4); - // adjust the stack-pointer to store a0, a12, a13, a14 and locals, 16-byte aligned - as->stack_adjust = (((4 + num_locals) * WORD_SIZE) + 15) & ~15; - asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, -as->stack_adjust); + // adjust the stack-pointer to store a0, a12, a13, a14, a15 and locals, 16-byte aligned + as->stack_adjust = (((NUM_REGS_SAVED + num_locals) * WORD_SIZE) + 15) & ~15; + if (SIGNED_FIT8(-as->stack_adjust)) { + asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, -as->stack_adjust); + } else { + asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust); + asm_xtensa_op_sub(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9); + } - // save return value (a0) and callee-save registers (a12, a13, a14) + // save return value (a0) and callee-save registers (a12, a13, a14, a15) asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0); - asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A12, ASM_XTENSA_REG_A1, 1); - asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A13, ASM_XTENSA_REG_A1, 2); - asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A14, ASM_XTENSA_REG_A1, 3); + for (int i = 1; i < NUM_REGS_SAVED; ++i) { + asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A11 + i, ASM_XTENSA_REG_A1, i); + } } void asm_xtensa_exit(asm_xtensa_t *as) { // restore registers - asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A14, ASM_XTENSA_REG_A1, 3); - asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A13, ASM_XTENSA_REG_A1, 2); - asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A12, ASM_XTENSA_REG_A1, 1); + for (int i = NUM_REGS_SAVED - 1; i >= 1; --i) { + asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A11 + i, ASM_XTENSA_REG_A1, i); + } asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0); // restore stack-pointer and return - asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, as->stack_adjust); + if (SIGNED_FIT8(as->stack_adjust)) { + asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, as->stack_adjust); + } else { + asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust); + asm_xtensa_op_add_n(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9); + } + asm_xtensa_op_ret_n(as); } @@ -144,31 +156,74 @@ void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, ui asm_xtensa_op_movi_n(as, reg_dest, 0); } -void asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) { +size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) { + // load the constant + uint32_t const_table_offset = (uint8_t*)as->const_table - as->base.code_base; + size_t loc = const_table_offset + as->cur_const * WORD_SIZE; + asm_xtensa_op_l32r(as, reg_dest, as->base.code_offset, loc); + // store the constant in the table + if (as->const_table != NULL) { + as->const_table[as->cur_const] = i32; + } + ++as->cur_const; + return loc; +} + +void asm_xtensa_mov_reg_i32_optimised(asm_xtensa_t *as, uint reg_dest, uint32_t i32) { if (SIGNED_FIT12(i32)) { asm_xtensa_op_movi(as, reg_dest, i32); } else { - // load the constant - asm_xtensa_op_l32r(as, reg_dest, as->base.code_offset, 4 + as->cur_const * WORD_SIZE); - // store the constant in the table - if (as->const_table != NULL) { - as->const_table[as->cur_const] = i32; - } - ++as->cur_const; + asm_xtensa_mov_reg_i32(as, reg_dest, i32); } } void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src) { - asm_xtensa_op_s32i(as, reg_src, ASM_XTENSA_REG_A1, 4 + local_num); + asm_xtensa_op_s32i(as, reg_src, ASM_XTENSA_REG_A1, NUM_REGS_SAVED + local_num); } void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num) { - asm_xtensa_op_l32i(as, reg_dest, ASM_XTENSA_REG_A1, 4 + local_num); + asm_xtensa_op_l32i(as, reg_dest, ASM_XTENSA_REG_A1, NUM_REGS_SAVED + local_num); } void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num) { - asm_xtensa_op_mov_n(as, reg_dest, ASM_XTENSA_REG_A1); - asm_xtensa_op_addi(as, reg_dest, reg_dest, (4 + local_num) * WORD_SIZE); + uint off = (NUM_REGS_SAVED + local_num) * WORD_SIZE; + if (SIGNED_FIT8(off)) { + asm_xtensa_op_addi(as, reg_dest, ASM_XTENSA_REG_A1, off); + } else { + asm_xtensa_op_movi(as, reg_dest, off); + asm_xtensa_op_add_n(as, reg_dest, reg_dest, ASM_XTENSA_REG_A1); + } +} + +void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label) { + // Get relative offset from PC + uint32_t dest = get_label_dest(as, label); + int32_t rel = dest - as->base.code_offset; + rel -= 3 + 3; // account for 3 bytes of movi instruction, 3 bytes call0 adjustment + asm_xtensa_op_movi(as, reg_dest, rel); // imm has 12-bit range + + // Use call0 to get PC+3 into a0 + // call0 destination must be aligned on 4 bytes: + // - code_offset&3=0: off=0, pad=1 + // - code_offset&3=1: off=0, pad=0 + // - code_offset&3=2: off=1, pad=3 + // - code_offset&3=3: off=1, pad=2 + uint32_t off = as->base.code_offset >> 1 & 1; + uint32_t pad = (5 - as->base.code_offset) & 3; + asm_xtensa_op_call0(as, off); + mp_asm_base_get_cur_to_write_bytes(&as->base, pad); + + // Add PC to relative offset + asm_xtensa_op_add_n(as, reg_dest, reg_dest, ASM_XTENSA_REG_A0); +} + +void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx) { + if (idx < 16) { + asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_FUN_TABLE, idx); + } else { + asm_xtensa_op_l32i(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_FUN_TABLE, idx); + } + asm_xtensa_op_callx0(as, ASM_XTENSA_REG_A0); } #endif // MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA diff --git a/python/src/py/asmxtensa.h b/python/src/py/asmxtensa.h index e6d4158cb..d95af14a5 100644 --- a/python/src/py/asmxtensa.h +++ b/python/src/py/asmxtensa.h @@ -26,6 +26,7 @@ #ifndef MICROPY_INCLUDED_PY_ASMXTENSA_H #define MICROPY_INCLUDED_PY_ASMXTENSA_H +#include "py/misc.h" #include "py/asmbase.h" // calling conventions: @@ -113,12 +114,12 @@ void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op); // raw instructions -static inline void asm_xtensa_op_add(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { - asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 8, reg_dest, reg_src_a, reg_src_b)); +static inline void asm_xtensa_op_add_n(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { + asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(10, reg_dest, reg_src_a, reg_src_b)); } static inline void asm_xtensa_op_addi(asm_xtensa_t *as, uint reg_dest, uint reg_src, int imm8) { - asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 12, reg_dest, reg_src, imm8 & 0xff)); + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 12, reg_src, reg_dest, imm8 & 0xff)); } static inline void asm_xtensa_op_and(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) { @@ -133,6 +134,10 @@ static inline void asm_xtensa_op_bccz(asm_xtensa_t *as, uint cond, uint reg_src, asm_xtensa_op24(as, ASM_XTENSA_ENCODE_BRI12(6, reg_src, cond, 1, rel12 & 0xfff)); } +static inline void asm_xtensa_op_call0(asm_xtensa_t *as, int32_t rel18) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALL(5, 0, rel18 & 0x3ffff)); +} + static inline void asm_xtensa_op_callx0(asm_xtensa_t *as, uint reg) { asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 3, 0)); } @@ -234,10 +239,16 @@ void asm_xtensa_j_label(asm_xtensa_t *as, uint label); void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label); void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label); void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2); -void asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32); +size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32); +void asm_xtensa_mov_reg_i32_optimised(asm_xtensa_t *as, uint reg_dest, uint32_t i32); void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src); void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num); void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num); +void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label); +void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx); + +// Holds a pointer to mp_fun_table +#define ASM_XTENSA_REG_FUN_TABLE ASM_XTENSA_REG_A15 #if GENERIC_ASM_API @@ -262,30 +273,31 @@ void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_nu #define REG_LOCAL_3 ASM_XTENSA_REG_A14 #define REG_LOCAL_NUM (3) +#define REG_FUN_TABLE ASM_XTENSA_REG_FUN_TABLE + #define ASM_T asm_xtensa_t #define ASM_END_PASS asm_xtensa_end_pass #define ASM_ENTRY asm_xtensa_entry #define ASM_EXIT asm_xtensa_exit #define ASM_JUMP asm_xtensa_j_label -#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \ asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_EQ, reg, label) -#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \ +#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \ asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_NE, reg, label) #define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \ asm_xtensa_bcc_reg_reg_label(as, ASM_XTENSA_CC_EQ, reg1, reg2, label) -#define ASM_CALL_IND(as, ptr, idx) \ - do { \ - asm_xtensa_mov_reg_i32(as, ASM_XTENSA_REG_A0, (uint32_t)ptr); \ - asm_xtensa_op_callx0(as, ASM_XTENSA_REG_A0); \ - } while (0) +#define ASM_JUMP_REG(as, reg) asm_xtensa_op_jx((as), (reg)) +#define ASM_CALL_IND(as, idx) asm_xtensa_call_ind((as), (idx)) #define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_xtensa_mov_local_reg((as), (local_num), (reg_src)) -#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm)) -#define ASM_MOV_REG_ALIGNED_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32_optimised((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm)) +#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm)) #define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_xtensa_mov_reg_local((as), (reg_dest), (local_num)) #define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mov_n((as), (reg_dest), (reg_src)) #define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_xtensa_mov_reg_local_addr((as), (reg_dest), (local_num)) +#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_xtensa_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) \ do { \ @@ -300,7 +312,7 @@ void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_nu #define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_or((as), (reg_dest), (reg_dest), (reg_src)) #define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_xor((as), (reg_dest), (reg_dest), (reg_src)) #define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_and((as), (reg_dest), (reg_dest), (reg_src)) -#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_add((as), (reg_dest), (reg_dest), (reg_src)) +#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_add_n((as), (reg_dest), (reg_dest), (reg_src)) #define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_sub((as), (reg_dest), (reg_dest), (reg_src)) #define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mull((as), (reg_dest), (reg_dest), (reg_src)) diff --git a/python/src/py/bc.c b/python/src/py/bc.c index 89d8b74f9..b178e7c20 100644 --- a/python/src/py/bc.c +++ b/python/src/py/bc.c @@ -292,7 +292,7 @@ continue2:; // MP_BC_MAKE_CLOSURE_DEFARGS // MP_BC_RAISE_VARARGS // There are 4 special opcodes that have an extra byte only when -// MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE is enabled: +// MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE is enabled (and they take a qstr): // MP_BC_LOAD_NAME // MP_BC_LOAD_GLOBAL // MP_BC_LOAD_ATTR @@ -321,7 +321,7 @@ STATIC const byte opcode_format_table[64] = { OC4(O, O, U, U), // 0x38-0x3b OC4(U, O, B, O), // 0x3c-0x3f OC4(O, B, B, O), // 0x40-0x43 - OC4(B, B, O, B), // 0x44-0x47 + OC4(O, U, O, B), // 0x44-0x47 OC4(U, U, U, U), // 0x48-0x4b OC4(U, U, U, U), // 0x4c-0x4f OC4(V, V, U, V), // 0x50-0x53 @@ -382,26 +382,30 @@ STATIC const byte opcode_format_table[64] = { #undef V #undef O -uint mp_opcode_format(const byte *ip, size_t *opcode_size) { +uint mp_opcode_format(const byte *ip, size_t *opcode_size, bool count_var_uint) { uint f = (opcode_format_table[*ip >> 2] >> (2 * (*ip & 3))) & 3; const byte *ip_start = ip; if (f == MP_OPCODE_QSTR) { + if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) { + if (*ip == MP_BC_LOAD_NAME + || *ip == MP_BC_LOAD_GLOBAL + || *ip == MP_BC_LOAD_ATTR + || *ip == MP_BC_STORE_ATTR) { + ip += 1; + } + } ip += 3; } else { int extra_byte = ( *ip == MP_BC_RAISE_VARARGS || *ip == MP_BC_MAKE_CLOSURE || *ip == MP_BC_MAKE_CLOSURE_DEFARGS - #if MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE - || *ip == MP_BC_LOAD_NAME - || *ip == MP_BC_LOAD_GLOBAL - || *ip == MP_BC_LOAD_ATTR - || *ip == MP_BC_STORE_ATTR - #endif ); ip += 1; if (f == MP_OPCODE_VAR_UINT) { - while ((*ip++ & 0x80) != 0) { + if (count_var_uint) { + while ((*ip++ & 0x80) != 0) { + } } } else if (f == MP_OPCODE_OFFSET) { ip += 2; diff --git a/python/src/py/bc.h b/python/src/py/bc.h index ebfdeaac1..0aadfa8a3 100644 --- a/python/src/py/bc.h +++ b/python/src/py/bc.h @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -114,7 +115,7 @@ const byte *mp_bytecode_print_str(const byte *ip); #define MP_OPCODE_VAR_UINT (2) #define MP_OPCODE_OFFSET (3) -uint mp_opcode_format(const byte *ip, size_t *opcode_size); +uint mp_opcode_format(const byte *ip, size_t *opcode_size, bool count_var_uint); #endif diff --git a/python/src/py/bc0.h b/python/src/py/bc0.h index 70acfb0ca..175ee263a 100644 --- a/python/src/py/bc0.h +++ b/python/src/py/bc0.h @@ -77,8 +77,7 @@ #define MP_BC_END_FINALLY (0x41) #define MP_BC_GET_ITER (0x42) #define MP_BC_FOR_ITER (0x43) // rel byte code offset, 16-bit unsigned -#define MP_BC_POP_BLOCK (0x44) -#define MP_BC_POP_EXCEPT (0x45) +#define MP_BC_POP_EXCEPT_JUMP (0x44) // rel byte code offset, 16-bit unsigned #define MP_BC_UNWIND_JUMP (0x46) // rel byte code offset, 16-bit signed, in excess; then a byte #define MP_BC_GET_ITER_STACK (0x47) diff --git a/python/src/py/binary.c b/python/src/py/binary.c index f509ff010..a142776c3 100644 --- a/python/src/py/binary.c +++ b/python/src/py/binary.c @@ -3,7 +3,8 @@ * * The MIT License (MIT) * - * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2017 Paul Sokolovsky + * Copyright (c) 2014-2019 Damien P. George * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -293,7 +294,7 @@ void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte ** #endif default: #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE - if (MP_OBJ_IS_TYPE(val_in, &mp_type_int)) { + if (mp_obj_is_type(val_in, &mp_type_int)) { mp_obj_int_to_bytes_impl(val_in, struct_type == '>', size, p); return; } else @@ -330,7 +331,7 @@ void mp_binary_set_val_array(char typecode, void *p, mp_uint_t index, mp_obj_t v break; default: #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE - if (MP_OBJ_IS_TYPE(val_in, &mp_type_int)) { + if (mp_obj_is_type(val_in, &mp_type_int)) { size_t size = mp_binary_get_size('@', typecode, NULL); mp_obj_int_to_bytes_impl(val_in, MP_ENDIANNESS_BIG, size, (uint8_t*)p + index * size); diff --git a/python/src/py/binary.h b/python/src/py/binary.h index 0dae6a29e..71182042f 100644 --- a/python/src/py/binary.h +++ b/python/src/py/binary.h @@ -3,7 +3,8 @@ * * The MIT License (MIT) * - * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2014-2017 Damien P. George * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal diff --git a/python/src/py/builtin.h b/python/src/py/builtin.h index 84b99a8a4..a5e0f5f2d 100644 --- a/python/src/py/builtin.h +++ b/python/src/py/builtin.h @@ -63,7 +63,11 @@ MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_len_obj); MP_DECLARE_CONST_FUN_OBJ_0(mp_builtin_locals_obj); MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_max_obj); MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_min_obj); +#if MICROPY_PY_BUILTINS_NEXT2 +MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj); +#else MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_next_obj); +#endif MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_oct_obj); MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_ord_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj); @@ -106,6 +110,7 @@ extern const mp_obj_module_t mp_module_ujson; extern const mp_obj_module_t mp_module_ure; extern const mp_obj_module_t mp_module_uheapq; extern const mp_obj_module_t mp_module_uhashlib; +extern const mp_obj_module_t mp_module_ucryptolib; extern const mp_obj_module_t mp_module_ubinascii; extern const mp_obj_module_t mp_module_urandom; extern const mp_obj_module_t mp_module_uselect; @@ -113,7 +118,7 @@ extern const mp_obj_module_t mp_module_ussl; extern const mp_obj_module_t mp_module_utimeq; extern const mp_obj_module_t mp_module_machine; extern const mp_obj_module_t mp_module_lwip; -extern const mp_obj_module_t mp_module_websocket; +extern const mp_obj_module_t mp_module_uwebsocket; extern const mp_obj_module_t mp_module_webrepl; extern const mp_obj_module_t mp_module_framebuf; extern const mp_obj_module_t mp_module_btree; diff --git a/python/src/py/builtinevex.c b/python/src/py/builtinevex.c index 846603f46..819e3e1c6 100644 --- a/python/src/py/builtinevex.c +++ b/python/src/py/builtinevex.c @@ -52,7 +52,7 @@ STATIC mp_obj_t code_execute(mp_obj_code_t *self, mp_obj_dict_t *globals, mp_obj // a bit of a hack: fun_bc will re-set globals, so need to make sure it's // the correct one - if (MP_OBJ_IS_TYPE(self->module_fun, &mp_type_fun_bc)) { + if (mp_obj_is_type(self->module_fun, &mp_type_fun_bc)) { mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(self->module_fun); fun_bc->globals = globals; } @@ -114,7 +114,7 @@ STATIC mp_obj_t eval_exec_helper(size_t n_args, const mp_obj_t *args, mp_parse_i mp_obj_dict_t *locals = mp_locals_get(); for (size_t i = 1; i < 3 && i < n_args; ++i) { if (args[i] != mp_const_none) { - if (!MP_OBJ_IS_TYPE(args[i], &mp_type_dict)) { + if (!mp_obj_is_type(args[i], &mp_type_dict)) { mp_raise_TypeError(NULL); } locals = MP_OBJ_TO_PTR(args[i]); @@ -125,7 +125,7 @@ STATIC mp_obj_t eval_exec_helper(size_t n_args, const mp_obj_t *args, mp_parse_i } #if MICROPY_PY_BUILTINS_COMPILE - if (MP_OBJ_IS_TYPE(args[0], &mp_type_code)) { + if (mp_obj_is_type(args[0], &mp_type_code)) { return code_execute(MP_OBJ_TO_PTR(args[0]), globals, locals); } #endif diff --git a/python/src/py/builtinhelp.c b/python/src/py/builtinhelp.c index 6c2c3b92c..a7fede00a 100644 --- a/python/src/py/builtinhelp.c +++ b/python/src/py/builtinhelp.c @@ -58,7 +58,7 @@ STATIC void mp_help_print_info_about_object(mp_obj_t name_o, mp_obj_t value) { #if MICROPY_PY_BUILTINS_HELP_MODULES STATIC void mp_help_add_from_map(mp_obj_t list, const mp_map_t *map) { for (size_t i = 0; i < map->alloc; i++) { - if (MP_MAP_SLOT_IS_FILLED(map, i)) { + if (mp_map_slot_is_filled(map, i)) { mp_obj_list_append(list, map->table[i].key); } } @@ -123,8 +123,10 @@ STATIC void mp_help_print_modules(void) { mp_print_str(MP_PYTHON_PRINTER, "\n"); } + #if MICROPY_ENABLE_EXTERNAL_IMPORT // let the user know there may be other modules available from the filesystem mp_print_str(MP_PYTHON_PRINTER, "Plus any modules on the filesystem\n"); + #endif } #endif @@ -145,13 +147,13 @@ STATIC void mp_help_print_obj(const mp_obj_t obj) { mp_map_t *map = NULL; if (type == &mp_type_module) { - map = mp_obj_dict_get_map(mp_obj_module_get_globals(obj)); + map = &mp_obj_module_get_globals(obj)->map; } else { if (type == &mp_type_type) { type = MP_OBJ_TO_PTR(obj); } - if (type->locals_dict != MP_OBJ_NULL && MP_OBJ_IS_TYPE(type->locals_dict, &mp_type_dict)) { - map = mp_obj_dict_get_map(type->locals_dict); + if (type->locals_dict != NULL) { + map = &type->locals_dict->map; } } if (map != NULL) { diff --git a/python/src/py/builtinimport.c b/python/src/py/builtinimport.c index b8ed096ca..1a333b540 100644 --- a/python/src/py/builtinimport.c +++ b/python/src/py/builtinimport.c @@ -131,7 +131,7 @@ STATIC mp_import_stat_t find_file(const char *file_str, uint file_len, vstr_t *d #endif } -#if MICROPY_ENABLE_COMPILER +#if MICROPY_MODULE_FROZEN_STR || MICROPY_ENABLE_COMPILER STATIC void do_load_from_lexer(mp_obj_t module_obj, mp_lexer_t *lex) { #if MICROPY_PY___FILE__ qstr source_name = lex->source_name; @@ -182,7 +182,7 @@ STATIC void do_execute_raw_code(mp_obj_t module_obj, mp_raw_code_t *raw_code) { #endif STATIC void do_load(mp_obj_t module_obj, vstr_t *file) { - #if MICROPY_MODULE_FROZEN || MICROPY_PERSISTENT_CODE_LOAD || MICROPY_ENABLE_COMPILER + #if MICROPY_MODULE_FROZEN || MICROPY_ENABLE_COMPILER || (MICROPY_PERSISTENT_CODE_LOAD && MICROPY_HAS_FILE_READER) char *file_str = vstr_null_terminated_str(file); #endif @@ -213,7 +213,7 @@ STATIC void do_load(mp_obj_t module_obj, vstr_t *file) { // If we support loading .mpy files then check if the file extension is of // the correct format and, if so, load and execute the file. - #if MICROPY_PERSISTENT_CODE_LOAD + #if MICROPY_HAS_FILE_READER && MICROPY_PERSISTENT_CODE_LOAD if (file_str[file->len - 3] == 'm') { mp_raw_code_t *raw_code = mp_raw_code_load_file(file_str); do_execute_raw_code(module_obj, raw_code); @@ -229,7 +229,6 @@ STATIC void do_load(mp_obj_t module_obj, vstr_t *file) { return; } #else - // If we get here then the file was not frozen and we can't compile scripts. mp_raise_msg(&mp_type_ImportError, "script compilation not supported"); #endif diff --git a/python/src/py/compile.c b/python/src/py/compile.c index 9200b346b..27b706c8f 100644 --- a/python/src/py/compile.c +++ b/python/src/py/compile.c @@ -35,6 +35,7 @@ #include "py/compile.h" #include "py/runtime.h" #include "py/asmbase.h" +#include "py/persistentcode.h" #if MICROPY_ENABLE_COMPILER @@ -65,20 +66,38 @@ typedef enum { // we need a method table to do the lookup for the emitter functions #define EMIT(fun) (comp->emit_method_table->fun(comp->emit)) #define EMIT_ARG(fun, ...) (comp->emit_method_table->fun(comp->emit, __VA_ARGS__)) -#define EMIT_LOAD_FAST(qst, local_num) (comp->emit_method_table->load_id.fast(comp->emit, qst, local_num)) -#define EMIT_LOAD_GLOBAL(qst) (comp->emit_method_table->load_id.global(comp->emit, qst)) +#define EMIT_LOAD_FAST(qst, local_num) (comp->emit_method_table->load_id.local(comp->emit, qst, local_num, MP_EMIT_IDOP_LOCAL_FAST)) +#define EMIT_LOAD_GLOBAL(qst) (comp->emit_method_table->load_id.global(comp->emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL)) #else // if we only have the bytecode emitter enabled then we can do a direct call to the functions #define EMIT(fun) (mp_emit_bc_##fun(comp->emit)) #define EMIT_ARG(fun, ...) (mp_emit_bc_##fun(comp->emit, __VA_ARGS__)) -#define EMIT_LOAD_FAST(qst, local_num) (mp_emit_bc_load_fast(comp->emit, qst, local_num)) -#define EMIT_LOAD_GLOBAL(qst) (mp_emit_bc_load_global(comp->emit, qst)) +#define EMIT_LOAD_FAST(qst, local_num) (mp_emit_bc_load_local(comp->emit, qst, local_num, MP_EMIT_IDOP_LOCAL_FAST)) +#define EMIT_LOAD_GLOBAL(qst) (mp_emit_bc_load_global(comp->emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL)) #endif -#if MICROPY_EMIT_NATIVE +#if MICROPY_EMIT_NATIVE && MICROPY_DYNAMIC_COMPILER + +#define NATIVE_EMITTER(f) emit_native_table[mp_dynamic_compiler.native_arch]->emit_##f +#define NATIVE_EMITTER_TABLE emit_native_table[mp_dynamic_compiler.native_arch] + +STATIC const emit_method_table_t *emit_native_table[] = { + NULL, + &emit_native_x86_method_table, + &emit_native_x64_method_table, + &emit_native_arm_method_table, + &emit_native_thumb_method_table, + &emit_native_thumb_method_table, + &emit_native_thumb_method_table, + &emit_native_thumb_method_table, + &emit_native_thumb_method_table, + &emit_native_xtensa_method_table, +}; + +#elif MICROPY_EMIT_NATIVE // define a macro to access external native emitter #if MICROPY_EMIT_X64 #define NATIVE_EMITTER(f) emit_native_x64_##f @@ -93,9 +112,28 @@ typedef enum { #else #error "unknown native emitter" #endif +#define NATIVE_EMITTER_TABLE &NATIVE_EMITTER(method_table) #endif -#if MICROPY_EMIT_INLINE_ASM +#if MICROPY_EMIT_INLINE_ASM && MICROPY_DYNAMIC_COMPILER + +#define ASM_EMITTER(f) emit_asm_table[mp_dynamic_compiler.native_arch]->asm_##f +#define ASM_EMITTER_TABLE emit_asm_table[mp_dynamic_compiler.native_arch] + +STATIC const emit_inline_asm_method_table_t *emit_asm_table[] = { + NULL, + NULL, + NULL, + &emit_inline_thumb_method_table, + &emit_inline_thumb_method_table, + &emit_inline_thumb_method_table, + &emit_inline_thumb_method_table, + &emit_inline_thumb_method_table, + &emit_inline_thumb_method_table, + &emit_inline_xtensa_method_table, +}; + +#elif MICROPY_EMIT_INLINE_ASM // define macros for inline assembler #if MICROPY_EMIT_INLINE_THUMB #define ASM_DECORATOR_QSTR MP_QSTR_asm_thumb @@ -106,6 +144,7 @@ typedef enum { #else #error "unknown asm emitter" #endif +#define ASM_EMITTER_TABLE &ASM_EMITTER(method_table) #endif #define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm)) @@ -164,13 +203,25 @@ STATIC void compile_syntax_error(compiler_t *comp, mp_parse_node_t pn, const cha STATIC void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra); STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind); +STATIC void compile_atom_brace_helper(compiler_t *comp, mp_parse_node_struct_t *pns, bool create_map); STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn); STATIC uint comp_next_label(compiler_t *comp) { return comp->next_label++; } -STATIC void compile_increase_except_level(compiler_t *comp) { +#if MICROPY_EMIT_NATIVE +STATIC void reserve_labels_for_native(compiler_t *comp, int n) { + if (comp->scope_cur->emit_options != MP_EMIT_OPT_BYTECODE) { + comp->next_label += n; + } +} +#else +#define reserve_labels_for_native(comp, n) +#endif + +STATIC void compile_increase_except_level(compiler_t *comp, uint label, int kind) { + EMIT_ARG(setup_block, label, kind); comp->cur_except_level += 1; if (comp->cur_except_level > comp->scope_cur->exc_stack_size) { comp->scope_cur->exc_stack_size = comp->cur_except_level; @@ -180,6 +231,8 @@ STATIC void compile_increase_except_level(compiler_t *comp) { STATIC void compile_decrease_except_level(compiler_t *comp) { assert(comp->cur_except_level > 0); comp->cur_except_level -= 1; + EMIT(end_finally); + reserve_labels_for_native(comp, 1); } STATIC scope_t *scope_new_and_link(compiler_t *comp, scope_kind_t kind, mp_parse_node_t pn, uint emit_options) { @@ -273,7 +326,7 @@ STATIC void c_tuple(compiler_t *comp, mp_parse_node_t pn, mp_parse_node_struct_t } total += n; } - EMIT_ARG(build_tuple, total); + EMIT_ARG(build, total, MP_EMIT_BUILD_TUPLE); } STATIC void compile_generic_tuple(compiler_t *comp, mp_parse_node_struct_t *pns) { @@ -366,14 +419,14 @@ STATIC void c_assign_atom_expr(compiler_t *comp, mp_parse_node_struct_t *pns, as if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) { if (assign_kind == ASSIGN_AUG_STORE) { EMIT(rot_three); - EMIT(store_subscr); + EMIT_ARG(subscr, MP_EMIT_SUBSCR_STORE); } else { compile_node(comp, pns1->nodes[0]); if (assign_kind == ASSIGN_AUG_LOAD) { EMIT(dup_top_two); - EMIT(load_subscr); + EMIT_ARG(subscr, MP_EMIT_SUBSCR_LOAD); } else { - EMIT(store_subscr); + EMIT_ARG(subscr, MP_EMIT_SUBSCR_STORE); } } return; @@ -381,12 +434,12 @@ STATIC void c_assign_atom_expr(compiler_t *comp, mp_parse_node_struct_t *pns, as assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0])); if (assign_kind == ASSIGN_AUG_LOAD) { EMIT(dup_top); - EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0])); + EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_LOAD); } else { if (assign_kind == ASSIGN_AUG_STORE) { EMIT(rot_two); } - EMIT_ARG(store_attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0])); + EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_STORE); } return; } @@ -554,6 +607,11 @@ STATIC void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int } this_scope->num_def_pos_args = n_pos_defaults; + #if MICROPY_EMIT_NATIVE + // When creating a function/closure it will take a reference to the current globals + comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_REFGLOBALS | MP_SCOPE_FLAG_HASCONSTS; + #endif + // make closed over variables, if any // ensure they are closed over in the order defined in the outer scope (mainly to agree with CPython) int nfree = 0; @@ -652,12 +710,12 @@ STATIC void compile_funcdef_lambdef_param(compiler_t *comp, mp_parse_node_t pn) // in MicroPython we put the default positional parameters into a tuple using the bytecode // we need to do this here before we start building the map for the default keywords if (comp->num_default_params > 0) { - EMIT_ARG(build_tuple, comp->num_default_params); + EMIT_ARG(build, comp->num_default_params, MP_EMIT_BUILD_TUPLE); } else { EMIT(load_null); // sentinel indicating empty default positional args } // first default dict param, so make the map - EMIT_ARG(build_map, 0); + EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP); } // compile value then key, then store it to the dict @@ -693,7 +751,7 @@ STATIC void compile_funcdef_lambdef(compiler_t *comp, scope_t *scope, mp_parse_n // in MicroPython we put the default positional parameters into a tuple using the bytecode // the default keywords args may have already made the tuple; if not, do it now if (comp->num_default_params > 0 && comp->num_dict_params == 0) { - EMIT_ARG(build_tuple, comp->num_default_params); + EMIT_ARG(build, comp->num_default_params, MP_EMIT_BUILD_TUPLE); EMIT(load_null); // sentinel indicating empty default keyword args } @@ -780,13 +838,32 @@ STATIC bool compile_built_in_decorator(compiler_t *comp, int name_len, mp_parse_ *emit_options = MP_EMIT_OPT_VIPER; #endif #if MICROPY_EMIT_INLINE_ASM + #if MICROPY_DYNAMIC_COMPILER + } else if (attr == MP_QSTR_asm_thumb) { + *emit_options = MP_EMIT_OPT_ASM; + } else if (attr == MP_QSTR_asm_xtensa) { + *emit_options = MP_EMIT_OPT_ASM; + #else } else if (attr == ASM_DECORATOR_QSTR) { *emit_options = MP_EMIT_OPT_ASM; #endif + #endif } else { compile_syntax_error(comp, name_nodes[1], "invalid micropython decorator"); } + #if MICROPY_DYNAMIC_COMPILER + if (*emit_options == MP_EMIT_OPT_NATIVE_PYTHON || *emit_options == MP_EMIT_OPT_VIPER) { + if (emit_native_table[mp_dynamic_compiler.native_arch] == NULL) { + compile_syntax_error(comp, name_nodes[1], "invalid arch"); + } + } else if (*emit_options == MP_EMIT_OPT_ASM) { + if (emit_asm_table[mp_dynamic_compiler.native_arch] == NULL) { + compile_syntax_error(comp, name_nodes[1], "invalid arch"); + } + } + #endif + return true; } @@ -820,7 +897,7 @@ STATIC void compile_decorated(compiler_t *comp, mp_parse_node_struct_t *pns) { compile_node(comp, name_nodes[0]); for (int j = 1; j < name_len; j++) { assert(MP_PARSE_NODE_IS_ID(name_nodes[j])); // should be - EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(name_nodes[j])); + EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(name_nodes[j]), MP_EMIT_ATTR_LOAD); } // nodes[1] contains arguments to the decorator function, if any @@ -884,10 +961,10 @@ STATIC void c_del_stmt(compiler_t *comp, mp_parse_node_t pn) { } if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) { compile_node(comp, pns1->nodes[0]); - EMIT(delete_subscr); + EMIT_ARG(subscr, MP_EMIT_SUBSCR_DELETE); } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) { assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0])); - EMIT_ARG(delete_attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0])); + EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_DELETE); } else { goto cannot_delete; } @@ -945,20 +1022,21 @@ STATIC void compile_del_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { apply_to_single_or_list(comp, pns->nodes[0], PN_exprlist, c_del_stmt); } -STATIC void compile_break_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { - if (comp->break_label == INVALID_LABEL) { - compile_syntax_error(comp, (mp_parse_node_t)pns, "'break' outside loop"); +STATIC void compile_break_cont_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { + uint16_t label; + const char *error_msg; + if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_break_stmt) { + label = comp->break_label; + error_msg = "'break' outside loop"; + } else { + label = comp->continue_label; + error_msg = "'continue' outside loop"; + } + if (label == INVALID_LABEL) { + compile_syntax_error(comp, (mp_parse_node_t)pns, error_msg); } assert(comp->cur_except_level >= comp->break_continue_except_level); - EMIT_ARG(break_loop, comp->break_label, comp->cur_except_level - comp->break_continue_except_level); -} - -STATIC void compile_continue_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { - if (comp->continue_label == INVALID_LABEL) { - compile_syntax_error(comp, (mp_parse_node_t)pns, "'continue' outside loop"); - } - assert(comp->cur_except_level >= comp->break_continue_except_level); - EMIT_ARG(continue_loop, comp->continue_label, comp->cur_except_level - comp->break_continue_except_level); + EMIT_ARG(unwind_jump, label, comp->cur_except_level - comp->break_continue_except_level); } STATIC void compile_return_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { @@ -1024,14 +1102,14 @@ STATIC void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) { if (MP_PARSE_NODE_IS_NULL(pn)) { // empty name (eg, from . import x) *q_base = MP_QSTR_; - EMIT_ARG(import_name, MP_QSTR_); // import the empty string + EMIT_ARG(import, MP_QSTR_, MP_EMIT_IMPORT_NAME); // import the empty string } else if (MP_PARSE_NODE_IS_ID(pn)) { // just a simple name qstr q_full = MP_PARSE_NODE_LEAF_ARG(pn); if (!is_as) { *q_base = q_full; } - EMIT_ARG(import_name, q_full); + EMIT_ARG(import, q_full, MP_EMIT_IMPORT_NAME); } else { assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_name)); // should be mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn; @@ -1058,10 +1136,10 @@ STATIC void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) { } qstr q_full = qstr_from_strn(q_ptr, len); mp_local_free(q_ptr); - EMIT_ARG(import_name, q_full); + EMIT_ARG(import, q_full, MP_EMIT_IMPORT_NAME); if (is_as) { for (int i = 1; i < n; i++) { - EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[i])); + EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), MP_EMIT_ATTR_LOAD); } } } @@ -1122,12 +1200,12 @@ STATIC void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) { // build the "fromlist" tuple EMIT_ARG(load_const_str, MP_QSTR__star_); - EMIT_ARG(build_tuple, 1); + EMIT_ARG(build, 1, MP_EMIT_BUILD_TUPLE); // do the import qstr dummy_q; do_import_name(comp, pn_import_source, &dummy_q); - EMIT(import_star); + EMIT_ARG(import, MP_QSTR_NULL, MP_EMIT_IMPORT_STAR); } else { EMIT_ARG(load_const_small_int, import_level); @@ -1141,7 +1219,7 @@ STATIC void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) { qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id EMIT_ARG(load_const_str, id2); } - EMIT_ARG(build_tuple, n); + EMIT_ARG(build, n, MP_EMIT_BUILD_TUPLE); // do the import qstr dummy_q; @@ -1150,7 +1228,7 @@ STATIC void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) { assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name)); mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t*)pn_nodes[i]; qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id - EMIT_ARG(import_from, id2); + EMIT_ARG(import, id2, MP_EMIT_IMPORT_FROM); if (MP_PARSE_NODE_IS_NULL(pns3->nodes[1])) { compile_store_id(comp, id2); } else { @@ -1161,37 +1239,24 @@ STATIC void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) { } } -STATIC void compile_declare_global(compiler_t *comp, mp_parse_node_t pn, qstr qst) { - bool added; - id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, &added); - if (!added && id_info->kind != ID_INFO_KIND_GLOBAL_EXPLICIT) { +STATIC void compile_declare_global(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info) { + if (id_info->kind != ID_INFO_KIND_UNDECIDED && id_info->kind != ID_INFO_KIND_GLOBAL_EXPLICIT) { compile_syntax_error(comp, pn, "identifier redefined as global"); return; } id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT; // if the id exists in the global scope, set its kind to EXPLICIT_GLOBAL - id_info = scope_find_global(comp->scope_cur, qst); + id_info = scope_find_global(comp->scope_cur, id_info->qst); if (id_info != NULL) { id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT; } } -STATIC void compile_global_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { - if (comp->pass == MP_PASS_SCOPE) { - mp_parse_node_t *nodes; - int n = mp_parse_node_extract_list(&pns->nodes[0], PN_name_list, &nodes); - for (int i = 0; i < n; i++) { - compile_declare_global(comp, (mp_parse_node_t)pns, MP_PARSE_NODE_LEAF_ARG(nodes[i])); - } - } -} - -STATIC void compile_declare_nonlocal(compiler_t *comp, mp_parse_node_t pn, qstr qst) { - bool added; - id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, &added); - if (added) { - scope_find_local_and_close_over(comp->scope_cur, id_info, qst); +STATIC void compile_declare_nonlocal(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info) { + if (id_info->kind == ID_INFO_KIND_UNDECIDED) { + id_info->kind = ID_INFO_KIND_GLOBAL_IMPLICIT; + scope_check_to_close_over(comp->scope_cur, id_info); if (id_info->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) { compile_syntax_error(comp, pn, "no binding for nonlocal found"); } @@ -1200,16 +1265,25 @@ STATIC void compile_declare_nonlocal(compiler_t *comp, mp_parse_node_t pn, qstr } } -STATIC void compile_nonlocal_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { +STATIC void compile_global_nonlocal_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { if (comp->pass == MP_PASS_SCOPE) { - if (comp->scope_cur->kind == SCOPE_MODULE) { + bool is_global = MP_PARSE_NODE_STRUCT_KIND(pns) == PN_global_stmt; + + if (!is_global && comp->scope_cur->kind == SCOPE_MODULE) { compile_syntax_error(comp, (mp_parse_node_t)pns, "can't declare nonlocal in outer code"); return; } + mp_parse_node_t *nodes; int n = mp_parse_node_extract_list(&pns->nodes[0], PN_name_list, &nodes); for (int i = 0; i < n; i++) { - compile_declare_nonlocal(comp, (mp_parse_node_t)pns, MP_PARSE_NODE_LEAF_ARG(nodes[i])); + qstr qst = MP_PARSE_NODE_LEAF_ARG(nodes[i]); + id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, ID_INFO_KIND_UNDECIDED); + if (is_global) { + compile_declare_global(comp, (mp_parse_node_t)pns, id_info); + } else { + compile_declare_nonlocal(comp, (mp_parse_node_t)pns, id_info); + } } } } @@ -1516,12 +1590,10 @@ STATIC void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_ uint l1 = comp_next_label(comp); uint success_label = comp_next_label(comp); - EMIT_ARG(setup_except, l1); - compile_increase_except_level(comp); + compile_increase_except_level(comp, l1, MP_EMIT_SETUP_BLOCK_EXCEPT); compile_node(comp, pn_body); // body - EMIT(pop_block); - EMIT_ARG(jump, success_label); // jump over exception handler + EMIT_ARG(pop_except_jump, success_label, false); // jump over exception handler EMIT_ARG(label_assign, l1); // start of exception handler EMIT(start_except_handler); @@ -1568,34 +1640,36 @@ STATIC void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_ compile_store_id(comp, qstr_exception_local); } + // If the exception is bound to a variable then the of the + // exception handler is wrapped in a try-finally so that the name can + // be deleted (per Python semantics) even if the has an exception. + // In such a case the generated code for the exception handler is: + // try: + // + // finally: + // = None + // del uint l3 = 0; if (qstr_exception_local != 0) { l3 = comp_next_label(comp); - EMIT_ARG(setup_finally, l3); - compile_increase_except_level(comp); + compile_increase_except_level(comp, l3, MP_EMIT_SETUP_BLOCK_FINALLY); } - compile_node(comp, pns_except->nodes[1]); - if (qstr_exception_local != 0) { - EMIT(pop_block); - } - EMIT(pop_except); + compile_node(comp, pns_except->nodes[1]); // the if (qstr_exception_local != 0) { EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); EMIT_ARG(label_assign, l3); EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); compile_store_id(comp, qstr_exception_local); compile_delete_id(comp, qstr_exception_local); - compile_decrease_except_level(comp); - EMIT(end_finally); } - EMIT_ARG(jump, l2); + + EMIT_ARG(pop_except_jump, l2, true); EMIT_ARG(label_assign, end_finally_label); EMIT_ARG(adjust_stack_size, 1); // stack adjust for the exception instance } compile_decrease_except_level(comp); - EMIT(end_finally); EMIT(end_except_handler); EMIT_ARG(label_assign, success_label); @@ -1606,8 +1680,7 @@ STATIC void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_ STATIC void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_except, mp_parse_node_t pn_else, mp_parse_node_t pn_finally) { uint l_finally_block = comp_next_label(comp); - EMIT_ARG(setup_finally, l_finally_block); - compile_increase_except_level(comp); + compile_increase_except_level(comp, l_finally_block, MP_EMIT_SETUP_BLOCK_FINALLY); if (n_except == 0) { assert(MP_PARSE_NODE_IS_NULL(pn_else)); @@ -1617,13 +1690,11 @@ STATIC void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n } else { compile_try_except(comp, pn_body, n_except, pn_except, pn_else); } - EMIT(pop_block); EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); EMIT_ARG(label_assign, l_finally_block); compile_node(comp, pn_finally); compile_decrease_except_level(comp); - EMIT(end_finally); } STATIC void compile_try_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { @@ -1659,30 +1730,24 @@ STATIC void compile_with_stmt_helper(compiler_t *comp, int n, mp_parse_node_t *n compile_node(comp, body); } else { uint l_end = comp_next_label(comp); - if (MICROPY_EMIT_NATIVE && comp->scope_cur->emit_options != MP_EMIT_OPT_BYTECODE) { - // we need to allocate an extra label for the native emitter - // it will use l_end+1 as an auxiliary label - comp_next_label(comp); - } if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) { // this pre-bit is of the form "a as b" mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)nodes[0]; compile_node(comp, pns->nodes[0]); - EMIT_ARG(setup_with, l_end); + compile_increase_except_level(comp, l_end, MP_EMIT_SETUP_BLOCK_WITH); c_assign(comp, pns->nodes[1], ASSIGN_STORE); } else { // this pre-bit is just an expression compile_node(comp, nodes[0]); - EMIT_ARG(setup_with, l_end); + compile_increase_except_level(comp, l_end, MP_EMIT_SETUP_BLOCK_WITH); EMIT(pop_top); } - compile_increase_except_level(comp); // compile additional pre-bits and the body compile_with_stmt_helper(comp, n - 1, nodes + 1, body); // finish this with block EMIT_ARG(with_cleanup, l_end); + reserve_labels_for_native(comp, 3); // used by native's with_cleanup compile_decrease_except_level(comp); - EMIT(end_finally); } } @@ -1699,7 +1764,8 @@ STATIC void compile_with_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { STATIC void compile_yield_from(compiler_t *comp) { EMIT_ARG(get_iter, false); EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); - EMIT(yield_from); + EMIT_ARG(yield, MP_EMIT_YIELD_FROM); + reserve_labels_for_native(comp, 3); } #if MICROPY_PY_ASYNC_AWAIT @@ -1726,14 +1792,12 @@ STATIC void compile_async_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns EMIT_ARG(label_assign, continue_label); - EMIT_ARG(setup_except, try_exception_label); - compile_increase_except_level(comp); + compile_increase_except_level(comp, try_exception_label, MP_EMIT_SETUP_BLOCK_EXCEPT); compile_load_id(comp, context); compile_await_object_method(comp, MP_QSTR___anext__); c_assign(comp, pns->nodes[0], ASSIGN_STORE); // variable - EMIT(pop_block); - EMIT_ARG(jump, try_else_label); + EMIT_ARG(pop_except_jump, try_else_label, false); EMIT_ARG(label_assign, try_exception_label); EMIT(start_except_handler); @@ -1742,13 +1806,11 @@ STATIC void compile_async_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH); EMIT_ARG(pop_jump_if, false, try_finally_label); EMIT(pop_top); // pop exception instance - EMIT(pop_except); - EMIT_ARG(jump, while_else_label); + EMIT_ARG(pop_except_jump, while_else_label, true); EMIT_ARG(label_assign, try_finally_label); EMIT_ARG(adjust_stack_size, 1); // if we jump here, the exc is on the stack compile_decrease_except_level(comp); - EMIT(end_finally); EMIT(end_except_handler); EMIT_ARG(label_assign, try_else_label); @@ -1769,49 +1831,72 @@ STATIC void compile_async_with_stmt_helper(compiler_t *comp, int n, mp_parse_nod // no more pre-bits, compile the body of the with compile_node(comp, body); } else { - uint try_exception_label = comp_next_label(comp); - uint no_reraise_label = comp_next_label(comp); - uint try_else_label = comp_next_label(comp); - uint end_label = comp_next_label(comp); - qstr context; + uint l_finally_block = comp_next_label(comp); + uint l_aexit_no_exc = comp_next_label(comp); + uint l_ret_unwind_jump = comp_next_label(comp); + uint l_end = comp_next_label(comp); if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) { // this pre-bit is of the form "a as b" mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)nodes[0]; compile_node(comp, pns->nodes[0]); - context = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); - compile_store_id(comp, context); - compile_load_id(comp, context); + EMIT(dup_top); compile_await_object_method(comp, MP_QSTR___aenter__); c_assign(comp, pns->nodes[1], ASSIGN_STORE); } else { // this pre-bit is just an expression compile_node(comp, nodes[0]); - context = MP_PARSE_NODE_LEAF_ARG(nodes[0]); - compile_store_id(comp, context); - compile_load_id(comp, context); + EMIT(dup_top); compile_await_object_method(comp, MP_QSTR___aenter__); EMIT(pop_top); } - compile_load_id(comp, context); - EMIT_ARG(load_method, MP_QSTR___aexit__, false); + // To keep the Python stack size down, and because we can't access values on + // this stack further down than 3 elements (via rot_three), we don't preload + // __aexit__ (as per normal with) but rather wait until we need it below. - EMIT_ARG(setup_except, try_exception_label); - compile_increase_except_level(comp); - // compile additional pre-bits and the body + // Start the try-finally statement + compile_increase_except_level(comp, l_finally_block, MP_EMIT_SETUP_BLOCK_FINALLY); + + // Compile any additional pre-bits of the "async with", and also the body + EMIT_ARG(adjust_stack_size, 3); // stack adjust for possible UNWIND_JUMP state compile_async_with_stmt_helper(comp, n - 1, nodes + 1, body); - // finish this with block - EMIT(pop_block); - EMIT_ARG(jump, try_else_label); // jump over exception handler + EMIT_ARG(adjust_stack_size, -3); - EMIT_ARG(label_assign, try_exception_label); // start of exception handler - EMIT(start_except_handler); + // We have now finished the "try" block and fall through to the "finally" - // at this point the stack contains: ..., __aexit__, self, exc + // At this point, after the with body has executed, we have 3 cases: + // 1. no exception, we just fall through to this point; stack: (..., ctx_mgr) + // 2. exception propagating out, we get to the finally block; stack: (..., ctx_mgr, exc) + // 3. return or unwind jump, we get to the finally block; stack: (..., ctx_mgr, X, INT) + + // Handle case 1: call __aexit__ + // Stack: (..., ctx_mgr) + EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // to tell end_finally there's no exception + EMIT(rot_two); + EMIT_ARG(jump, l_aexit_no_exc); // jump to code below to call __aexit__ + + // Start of "finally" block + // At this point we have case 2 or 3, we detect which one by the TOS being an exception or not + EMIT_ARG(label_assign, l_finally_block); + + // Detect if TOS an exception or not + EMIT(dup_top); + EMIT_LOAD_GLOBAL(MP_QSTR_BaseException); + EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH); + EMIT_ARG(pop_jump_if, false, l_ret_unwind_jump); // if not an exception then we have case 3 + + // Handle case 2: call __aexit__ and either swallow or re-raise the exception + // Stack: (..., ctx_mgr, exc) + EMIT(dup_top); + EMIT(rot_three); + EMIT(rot_two); + EMIT_ARG(load_method, MP_QSTR___aexit__, false); + EMIT(rot_three); + EMIT(rot_three); EMIT(dup_top); #if MICROPY_CPYTHON_COMPAT - EMIT_ARG(load_attr, MP_QSTR___class__); // get type(exc) + EMIT_ARG(attr, MP_QSTR___class__, MP_EMIT_ATTR_LOAD); // get type(exc) #else compile_load_id(comp, MP_QSTR_type); EMIT(rot_two); @@ -1819,32 +1904,37 @@ STATIC void compile_async_with_stmt_helper(compiler_t *comp, int n, mp_parse_nod #endif EMIT(rot_two); EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // dummy traceback value - // at this point the stack contains: ..., __aexit__, self, type(exc), exc, None + // Stack: (..., exc, __aexit__, ctx_mgr, type(exc), exc, None) EMIT_ARG(call_method, 3, 0, 0); - compile_yield_from(comp); - EMIT_ARG(pop_jump_if, true, no_reraise_label); - EMIT_ARG(raise_varargs, 0); + EMIT_ARG(pop_jump_if, false, l_end); + EMIT(pop_top); // pop exception + EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // replace with None to swallow exception + EMIT_ARG(jump, l_end); + EMIT_ARG(adjust_stack_size, 2); - EMIT_ARG(label_assign, no_reraise_label); - EMIT(pop_except); - EMIT_ARG(jump, end_label); - - EMIT_ARG(adjust_stack_size, 3); // adjust for __aexit__, self, exc - compile_decrease_except_level(comp); - EMIT(end_finally); - EMIT(end_except_handler); - - EMIT_ARG(label_assign, try_else_label); // start of try-else handler + // Handle case 3: call __aexit__ + // Stack: (..., ctx_mgr, X, INT) + EMIT_ARG(label_assign, l_ret_unwind_jump); + EMIT(rot_three); + EMIT(rot_three); + EMIT_ARG(label_assign, l_aexit_no_exc); + EMIT_ARG(load_method, MP_QSTR___aexit__, false); EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); EMIT(dup_top); EMIT(dup_top); EMIT_ARG(call_method, 3, 0, 0); compile_yield_from(comp); EMIT(pop_top); + EMIT_ARG(adjust_stack_size, -1); - EMIT_ARG(label_assign, end_label); - + // End of "finally" block + // Stack can have one of three configurations: + // a. (..., None) - from either case 1, or case 2 with swallowed exception + // b. (..., exc) - from case 2 with re-raised exception + // c. (..., X, INT) - from case 3 + EMIT_ARG(label_assign, l_end); + compile_decrease_except_level(comp); } } @@ -1988,15 +2078,6 @@ STATIC void compile_expr_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) { } } -STATIC void c_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns, mp_binary_op_t binary_op) { - int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns); - compile_node(comp, pns->nodes[0]); - for (int i = 1; i < num_nodes; i += 1) { - compile_node(comp, pns->nodes[i]); - EMIT_ARG(binary_op, binary_op); - } -} - STATIC void compile_test_if_expr(compiler_t *comp, mp_parse_node_struct_t *pns) { assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_test_if_else)); mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t*)pns->nodes[1]; @@ -2027,7 +2108,8 @@ STATIC void compile_lambdef(compiler_t *comp, mp_parse_node_struct_t *pns) { compile_funcdef_lambdef(comp, this_scope, pns->nodes[0], PN_varargslist); } -STATIC void compile_or_and_test(compiler_t *comp, mp_parse_node_struct_t *pns, bool cond) { +STATIC void compile_or_and_test(compiler_t *comp, mp_parse_node_struct_t *pns) { + bool cond = MP_PARSE_NODE_STRUCT_KIND(pns) == PN_or_test; uint l_end = comp_next_label(comp); int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns); for (int i = 0; i < n; i += 1) { @@ -2039,14 +2121,6 @@ STATIC void compile_or_and_test(compiler_t *comp, mp_parse_node_struct_t *pns, b EMIT_ARG(label_assign, l_end); } -STATIC void compile_or_test(compiler_t *comp, mp_parse_node_struct_t *pns) { - compile_or_and_test(comp, pns, true); -} - -STATIC void compile_and_test(compiler_t *comp, mp_parse_node_struct_t *pns) { - compile_or_and_test(comp, pns, false); -} - STATIC void compile_not_test_2(compiler_t *comp, mp_parse_node_struct_t *pns) { compile_node(comp, pns->nodes[0]); EMIT_ARG(unary_op, MP_UNARY_OP_NOT); @@ -2112,16 +2186,16 @@ STATIC void compile_star_expr(compiler_t *comp, mp_parse_node_struct_t *pns) { compile_syntax_error(comp, (mp_parse_node_t)pns, "*x must be assignment target"); } -STATIC void compile_expr(compiler_t *comp, mp_parse_node_struct_t *pns) { - c_binary_op(comp, pns, MP_BINARY_OP_OR); -} - -STATIC void compile_xor_expr(compiler_t *comp, mp_parse_node_struct_t *pns) { - c_binary_op(comp, pns, MP_BINARY_OP_XOR); -} - -STATIC void compile_and_expr(compiler_t *comp, mp_parse_node_struct_t *pns) { - c_binary_op(comp, pns, MP_BINARY_OP_AND); +STATIC void compile_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns) { + MP_STATIC_ASSERT(MP_BINARY_OP_OR + PN_xor_expr - PN_expr == MP_BINARY_OP_XOR); + MP_STATIC_ASSERT(MP_BINARY_OP_OR + PN_and_expr - PN_expr == MP_BINARY_OP_AND); + mp_binary_op_t binary_op = MP_BINARY_OP_OR + MP_PARSE_NODE_STRUCT_KIND(pns) - PN_expr; + int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns); + compile_node(comp, pns->nodes[0]); + for (int i = 1; i < num_nodes; ++i) { + compile_node(comp, pns->nodes[i]); + EMIT_ARG(binary_op, binary_op); + } } STATIC void compile_term(compiler_t *comp, mp_parse_node_struct_t *pns) { @@ -2225,6 +2299,20 @@ STATIC void compile_atom_expr_normal(compiler_t *comp, mp_parse_node_struct_t *p EMIT_ARG(call_function, 2, 0, 0); i = 1; } + + #if MICROPY_COMP_CONST_LITERAL && MICROPY_PY_COLLECTIONS_ORDEREDDICT + // handle special OrderedDict constructor + } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0]) + && MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]) == MP_QSTR_OrderedDict + && MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_trailer_paren + && MP_PARSE_NODE_IS_STRUCT_KIND(pns_trail[0]->nodes[0], PN_atom_brace)) { + // at this point we have matched "OrderedDict({...})" + + EMIT_ARG(call_function, 0, 0, 0); + mp_parse_node_struct_t *pns_dict = (mp_parse_node_struct_t*)pns_trail[0]->nodes[0]; + compile_atom_brace_helper(comp, pns_dict, false); + i = 1; + #endif } // compile the remaining trailers @@ -2397,7 +2485,7 @@ STATIC void compile_atom_paren(compiler_t *comp, mp_parse_node_struct_t *pns) { STATIC void compile_atom_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) { if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) { // empty list - EMIT_ARG(build_list, 0); + EMIT_ARG(build, 0, MP_EMIT_BUILD_LIST); } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) { mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t*)pns->nodes[0]; if (MP_PARSE_NODE_IS_STRUCT(pns2->nodes[1])) { @@ -2406,12 +2494,12 @@ STATIC void compile_atom_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) // list of one item, with trailing comma assert(MP_PARSE_NODE_IS_NULL(pns3->nodes[0])); compile_node(comp, pns2->nodes[0]); - EMIT_ARG(build_list, 1); + EMIT_ARG(build, 1, MP_EMIT_BUILD_LIST); } else if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_testlist_comp_3c) { // list of many items compile_node(comp, pns2->nodes[0]); compile_generic_all_nodes(comp, pns3); - EMIT_ARG(build_list, 1 + MP_PARSE_NODE_STRUCT_NUM_NODES(pns3)); + EMIT_ARG(build, 1 + MP_PARSE_NODE_STRUCT_NUM_NODES(pns3), MP_EMIT_BUILD_LIST); } else if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_comp_for) { // list comprehension compile_comprehension(comp, pns2, SCOPE_LIST_COMP); @@ -2424,25 +2512,29 @@ STATIC void compile_atom_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) list_with_2_items: compile_node(comp, pns2->nodes[0]); compile_node(comp, pns2->nodes[1]); - EMIT_ARG(build_list, 2); + EMIT_ARG(build, 2, MP_EMIT_BUILD_LIST); } } else { // list with 1 item compile_node(comp, pns->nodes[0]); - EMIT_ARG(build_list, 1); + EMIT_ARG(build, 1, MP_EMIT_BUILD_LIST); } } -STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) { +STATIC void compile_atom_brace_helper(compiler_t *comp, mp_parse_node_struct_t *pns, bool create_map) { mp_parse_node_t pn = pns->nodes[0]; if (MP_PARSE_NODE_IS_NULL(pn)) { // empty dict - EMIT_ARG(build_map, 0); + if (create_map) { + EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP); + } } else if (MP_PARSE_NODE_IS_STRUCT(pn)) { pns = (mp_parse_node_struct_t*)pn; if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker_item) { // dict with one element - EMIT_ARG(build_map, 1); + if (create_map) { + EMIT_ARG(build, 1, MP_EMIT_BUILD_MAP); + } compile_node(comp, pn); EMIT(store_map); } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) { @@ -2459,7 +2551,9 @@ STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) { bool is_dict; if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) { // a dictionary - EMIT_ARG(build_map, 1 + n); + if (create_map) { + EMIT_ARG(build, 1 + n, MP_EMIT_BUILD_MAP); + } compile_node(comp, pns->nodes[0]); EMIT(store_map); is_dict = true; @@ -2499,7 +2593,7 @@ STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) { #if MICROPY_PY_BUILTINS_SET // if it's a set, build it if (!is_dict) { - EMIT_ARG(build_set, 1 + n); + EMIT_ARG(build, 1 + n, MP_EMIT_BUILD_SET); } #endif } else { @@ -2522,13 +2616,17 @@ STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) { set_with_one_element: #if MICROPY_PY_BUILTINS_SET compile_node(comp, pn); - EMIT_ARG(build_set, 1); + EMIT_ARG(build, 1, MP_EMIT_BUILD_SET); #else assert(0); #endif } } +STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) { + compile_atom_brace_helper(comp, pns, true); +} + STATIC void compile_trailer_paren(compiler_t *comp, mp_parse_node_struct_t *pns) { compile_trailer_paren_helper(comp, pns->nodes[0], false, 0); } @@ -2536,22 +2634,31 @@ STATIC void compile_trailer_paren(compiler_t *comp, mp_parse_node_struct_t *pns) STATIC void compile_trailer_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) { // object who's index we want is on top of stack compile_node(comp, pns->nodes[0]); // the index - EMIT(load_subscr); + EMIT_ARG(subscr, MP_EMIT_SUBSCR_LOAD); } STATIC void compile_trailer_period(compiler_t *comp, mp_parse_node_struct_t *pns) { // object who's attribute we want is on top of stack - EMIT_ARG(load_attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0])); // attribute to get + EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]), MP_EMIT_ATTR_LOAD); // attribute to get } #if MICROPY_PY_BUILTINS_SLICE -STATIC void compile_subscript_3_helper(compiler_t *comp, mp_parse_node_struct_t *pns) { +STATIC void compile_subscript(compiler_t *comp, mp_parse_node_struct_t *pns) { + if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_2) { + compile_node(comp, pns->nodes[0]); // start of slice + assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be + pns = (mp_parse_node_struct_t*)pns->nodes[1]; + } else { + // pns is a PN_subscript_3, load None for start of slice + EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); + } + assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3); // should always be mp_parse_node_t pn = pns->nodes[0]; if (MP_PARSE_NODE_IS_NULL(pn)) { // [?:] EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); - EMIT_ARG(build_slice, 2); + EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE); } else if (MP_PARSE_NODE_IS_STRUCT(pn)) { pns = (mp_parse_node_struct_t*)pn; if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3c) { @@ -2559,11 +2666,11 @@ STATIC void compile_subscript_3_helper(compiler_t *comp, mp_parse_node_struct_t pn = pns->nodes[0]; if (MP_PARSE_NODE_IS_NULL(pn)) { // [?::] - EMIT_ARG(build_slice, 2); + EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE); } else { // [?::x] compile_node(comp, pn); - EMIT_ARG(build_slice, 3); + EMIT_ARG(build, 3, MP_EMIT_BUILD_SLICE); } } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3d) { compile_node(comp, pns->nodes[0]); @@ -2572,34 +2679,23 @@ STATIC void compile_subscript_3_helper(compiler_t *comp, mp_parse_node_struct_t assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_sliceop); // should always be if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) { // [?:x:] - EMIT_ARG(build_slice, 2); + EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE); } else { // [?:x:x] compile_node(comp, pns->nodes[0]); - EMIT_ARG(build_slice, 3); + EMIT_ARG(build, 3, MP_EMIT_BUILD_SLICE); } } else { // [?:x] compile_node(comp, pn); - EMIT_ARG(build_slice, 2); + EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE); } } else { // [?:x] compile_node(comp, pn); - EMIT_ARG(build_slice, 2); + EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE); } } - -STATIC void compile_subscript_2(compiler_t *comp, mp_parse_node_struct_t *pns) { - compile_node(comp, pns->nodes[0]); // start of slice - assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be - compile_subscript_3_helper(comp, (mp_parse_node_struct_t*)pns->nodes[1]); -} - -STATIC void compile_subscript_3(compiler_t *comp, mp_parse_node_struct_t *pns) { - EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); - compile_subscript_3_helper(comp, pns); -} #endif // MICROPY_PY_BUILTINS_SLICE STATIC void compile_dictorsetmaker_item(compiler_t *comp, mp_parse_node_struct_t *pns) { @@ -2621,14 +2717,16 @@ STATIC void compile_yield_expr(compiler_t *comp, mp_parse_node_struct_t *pns) { } if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) { EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); - EMIT(yield_value); + EMIT_ARG(yield, MP_EMIT_YIELD_VALUE); + reserve_labels_for_native(comp, 1); } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_yield_arg_from)) { pns = (mp_parse_node_struct_t*)pns->nodes[0]; compile_node(comp, pns->nodes[0]); compile_yield_from(comp); } else { compile_node(comp, pns->nodes[0]); - EMIT(yield_value); + EMIT_ARG(yield, MP_EMIT_YIELD_VALUE); + reserve_labels_for_native(comp, 1); } } @@ -2675,7 +2773,7 @@ STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn) { } else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) { mp_int_t arg = MP_PARSE_NODE_LEAF_SMALL_INT(pn); #if MICROPY_DYNAMIC_COMPILER - mp_uint_t sign_mask = -(1 << (mp_dynamic_compiler.small_int_bits - 1)); + mp_uint_t sign_mask = -((mp_uint_t)1 << (mp_dynamic_compiler.small_int_bits - 1)); if ((arg & sign_mask) == 0 || (arg & sign_mask) == sign_mask) { // integer fits in target runtime's small-int EMIT_ARG(load_const_small_int, arg); @@ -2725,7 +2823,28 @@ STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn) { } } +#if MICROPY_EMIT_NATIVE +STATIC int compile_viper_type_annotation(compiler_t *comp, mp_parse_node_t pn_annotation) { + int native_type = MP_NATIVE_TYPE_OBJ; + if (MP_PARSE_NODE_IS_NULL(pn_annotation)) { + // No annotation, type defaults to object + } else if (MP_PARSE_NODE_IS_ID(pn_annotation)) { + qstr type_name = MP_PARSE_NODE_LEAF_ARG(pn_annotation); + native_type = mp_native_type_from_qstr(type_name); + if (native_type < 0) { + comp->compile_error = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, "unknown type '%q'", type_name); + native_type = 0; + } + } else { + compile_syntax_error(comp, pn_annotation, "annotation must be an identifier"); + } + return native_type; +} +#endif + STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_name, pn_kind_t pn_star, pn_kind_t pn_dbl_star) { + (void)pn_dbl_star; + // check that **kw is last if ((comp->scope_cur->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) { compile_syntax_error(comp, pn, "invalid syntax"); @@ -2734,6 +2853,7 @@ STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn qstr param_name = MP_QSTR_NULL; uint param_flag = ID_FLAG_IS_PARAM; + mp_parse_node_struct_t *pns = NULL; if (MP_PARSE_NODE_IS_ID(pn)) { param_name = MP_PARSE_NODE_LEAF_ARG(pn); if (comp->have_star) { @@ -2745,8 +2865,9 @@ STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn } } else { assert(MP_PARSE_NODE_IS_STRUCT(pn)); - mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn; + pns = (mp_parse_node_struct_t*)pn; if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_name) { + // named parameter with possible annotation param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); if (comp->have_star) { // comes after a star, so counts as a keyword-only parameter @@ -2767,10 +2888,12 @@ STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn // bare star // TODO see http://www.python.org/dev/peps/pep-3102/ //assert(comp->scope_cur->num_dict_params == 0); + pns = NULL; } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0])) { // named star comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS; param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); + pns = NULL; } else { assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)); // should be // named star with possible annotation @@ -2779,6 +2902,7 @@ STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); } } else { + // double star with possible annotation assert(MP_PARSE_NODE_STRUCT_KIND(pns) == pn_dbl_star); // should be param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_DBL_STAR_PARAM; @@ -2787,14 +2911,21 @@ STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn } if (param_name != MP_QSTR_NULL) { - bool added; - id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, param_name, &added); - if (!added) { - compile_syntax_error(comp, pn, "name reused for argument"); + id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, param_name, ID_INFO_KIND_UNDECIDED); + if (id_info->kind != ID_INFO_KIND_UNDECIDED) { + compile_syntax_error(comp, pn, "argument name reused"); return; } id_info->kind = ID_INFO_KIND_LOCAL; id_info->flags = param_flag; + + #if MICROPY_EMIT_NATIVE + if (comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER && pn_name == PN_typedargslist_name && pns != NULL) { + id_info->flags |= compile_viper_type_annotation(comp, pns->nodes[1]) << ID_FLAG_VIPER_TYPE_POS; + } + #else + (void)pns; + #endif } } @@ -2806,49 +2937,6 @@ STATIC void compile_scope_lambda_param(compiler_t *comp, mp_parse_node_t pn) { compile_scope_func_lambda_param(comp, pn, PN_varargslist_name, PN_varargslist_star, PN_varargslist_dbl_star); } -#if MICROPY_EMIT_NATIVE -STATIC void compile_scope_func_annotations(compiler_t *comp, mp_parse_node_t pn) { - if (!MP_PARSE_NODE_IS_STRUCT(pn)) { - // no annotation - return; - } - - mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn; - if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_name) { - // named parameter with possible annotation - // fallthrough - } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_star) { - if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)) { - // named star with possible annotation - pns = (mp_parse_node_struct_t*)pns->nodes[0]; - // fallthrough - } else { - // no annotation - return; - } - } else { - assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_typedargslist_dbl_star); - // double star with possible annotation - // fallthrough - } - - mp_parse_node_t pn_annotation = pns->nodes[1]; - - if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) { - qstr param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); - id_info_t *id_info = scope_find(comp->scope_cur, param_name); - assert(id_info != NULL); - - if (MP_PARSE_NODE_IS_ID(pn_annotation)) { - qstr arg_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation); - EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ARG, id_info->local_num, arg_type); - } else { - compile_syntax_error(comp, pn_annotation, "parameter annotation must be an identifier"); - } - } -} -#endif // MICROPY_EMIT_NATIVE - STATIC void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_struct_t *pns_comp_for, mp_parse_node_t pn_inner_expr, int for_depth) { uint l_top = comp_next_label(comp); uint l_end = comp_next_label(comp); @@ -2862,7 +2950,8 @@ STATIC void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_struct_t *pn // no more nested if/for; compile inner expression compile_node(comp, pn_inner_expr); if (comp->scope_cur->kind == SCOPE_GEN_EXPR) { - EMIT(yield_value); + EMIT_ARG(yield, MP_EMIT_YIELD_VALUE); + reserve_labels_for_native(comp, 1); EMIT(pop_top); } else { EMIT_ARG(store_comp, comp->scope_cur->kind, 4 * for_depth + 5); @@ -2919,7 +3008,7 @@ STATIC void check_for_doc_string(compiler_t *comp, mp_parse_node_t pn) { if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0]) && MP_PARSE_NODE_LEAF_KIND(pns->nodes[0]) == MP_PARSE_NODE_STRING) || (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_const_object) - && MP_OBJ_IS_STR(get_const_object((mp_parse_node_struct_t*)pns->nodes[0])))) { + && mp_obj_is_str(get_const_object((mp_parse_node_struct_t*)pns->nodes[0])))) { // compile the doc string compile_node(comp, pns->nodes[0]); // store the doc string @@ -2937,6 +3026,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { comp->scope_cur = scope; comp->next_label = 0; EMIT_ARG(start_pass, pass, scope); + reserve_labels_for_native(comp, 6); // used by native's start_pass if (comp->pass == MP_PASS_SCOPE) { // reset maximum stack sizes in scope @@ -2968,28 +3058,14 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { if (comp->pass == MP_PASS_SCOPE) { comp->have_star = false; apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param); - } - #if MICROPY_EMIT_NATIVE - else if (scope->emit_options == MP_EMIT_OPT_VIPER) { - // compile annotations; only needed on latter compiler passes - // only needed for viper emitter - // argument annotations - apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_annotations); - - // pns->nodes[2] is return/whole function annotation - mp_parse_node_t pn_annotation = pns->nodes[2]; - if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) { - // nodes[2] can be null or a test-expr - if (MP_PARSE_NODE_IS_ID(pn_annotation)) { - qstr ret_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation); - EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_RETURN, 0, ret_type); - } else { - compile_syntax_error(comp, pn_annotation, "return annotation must be an identifier"); - } + #if MICROPY_EMIT_NATIVE + if (scope->emit_options == MP_EMIT_OPT_VIPER) { + // Compile return type; pns->nodes[2] is return/whole function annotation + scope->scope_flags |= compile_viper_type_annotation(comp, pns->nodes[2]) << MP_SCOPE_FLAG_VIPERRET_POS; } + #endif // MICROPY_EMIT_NATIVE } - #endif // MICROPY_EMIT_NATIVE compile_node(comp, pns->nodes[3]); // 3 is function body // emit return if it wasn't the last opcode @@ -3032,27 +3108,24 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { // so we use the blank qstr. qstr qstr_arg = MP_QSTR_; if (comp->pass == MP_PASS_SCOPE) { - bool added; - id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qstr_arg, &added); - assert(added); - id_info->kind = ID_INFO_KIND_LOCAL; + scope_find_or_add_id(comp->scope_cur, qstr_arg, ID_INFO_KIND_LOCAL); scope->num_pos_args = 1; } if (scope->kind == SCOPE_LIST_COMP) { - EMIT_ARG(build_list, 0); + EMIT_ARG(build, 0, MP_EMIT_BUILD_LIST); } else if (scope->kind == SCOPE_DICT_COMP) { - EMIT_ARG(build_map, 0); + EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP); #if MICROPY_PY_BUILTINS_SET } else if (scope->kind == SCOPE_SET_COMP) { - EMIT_ARG(build_set, 0); + EMIT_ARG(build, 0, MP_EMIT_BUILD_SET); #endif } // There are 4 slots on the stack for the iterator, and the first one is // NULL to indicate that the second one points to the iterator object. if (scope->kind == SCOPE_GEN_EXPR) { - // TODO static assert that MP_OBJ_ITER_BUF_NSLOTS == 4 + MP_STATIC_ASSERT(MP_OBJ_ITER_BUF_NSLOTS == 4); EMIT(load_null); compile_load_id(comp, qstr_arg); EMIT(load_null); @@ -3075,10 +3148,7 @@ STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) { assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_classdef); if (comp->pass == MP_PASS_SCOPE) { - bool added; - id_info_t *id_info = scope_find_or_add_id(scope, MP_QSTR___class__, &added); - assert(added); - id_info->kind = ID_INFO_KIND_LOCAL; + scope_find_or_add_id(scope, MP_QSTR___class__, ID_INFO_KIND_LOCAL); } compile_load_id(comp, MP_QSTR___name__); @@ -3254,7 +3324,11 @@ STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind void *f = mp_asm_base_get_code((mp_asm_base_t*)comp->emit_inline_asm); mp_emit_glue_assign_native(comp->scope_cur->raw_code, MP_CODE_NATIVE_ASM, f, mp_asm_base_get_code_size((mp_asm_base_t*)comp->emit_inline_asm), - NULL, comp->scope_cur->num_pos_args, 0, type_sig); + NULL, + #if MICROPY_PERSISTENT_CODE_SAVE + 0, 0, 0, 0, NULL, + #endif + comp->scope_cur->num_pos_args, 0, type_sig); } } @@ -3296,6 +3370,17 @@ STATIC void scope_compute_things(scope_t *scope) { if (SCOPE_IS_FUNC_LIKE(scope->kind) && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) { id->kind = ID_INFO_KIND_GLOBAL_EXPLICIT; } + #if MICROPY_EMIT_NATIVE + if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) { + // This function makes a reference to a global variable + if (scope->emit_options == MP_EMIT_OPT_VIPER + && mp_native_type_from_qstr(id->qst) >= MP_NATIVE_TYPE_INT) { + // A casting operator in viper mode, not a real global reference + } else { + scope->scope_flags |= MP_SCOPE_FLAG_REFGLOBALS; + } + } + #endif // params always count for 1 local, even if they are a cell if (id->kind == ID_INFO_KIND_LOCAL || (id->flags & ID_FLAG_IS_PARAM)) { id->local_num = scope->num_locals++; @@ -3372,13 +3457,21 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f #endif uint max_num_labels = 0; for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) { - if (false) { #if MICROPY_EMIT_INLINE_ASM - } else if (s->emit_options == MP_EMIT_OPT_ASM) { + if (s->emit_options == MP_EMIT_OPT_ASM) { compile_scope_inline_asm(comp, s, MP_PASS_SCOPE); + } else #endif - } else { + { compile_scope(comp, s, MP_PASS_SCOPE); + + // Check if any implicitly declared variables should be closed over + for (size_t i = 0; i < s->id_info_len; ++i) { + id_info_t *id = &s->id_info[i]; + if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) { + scope_check_to_close_over(s, id); + } + } } // update maximim number of labels needed @@ -3400,30 +3493,32 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f emit_t *emit_native = NULL; #endif for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) { - if (false) { - // dummy - #if MICROPY_EMIT_INLINE_ASM - } else if (s->emit_options == MP_EMIT_OPT_ASM) { + if (s->emit_options == MP_EMIT_OPT_ASM) { // inline assembly if (comp->emit_inline_asm == NULL) { comp->emit_inline_asm = ASM_EMITTER(new)(max_num_labels); } comp->emit = NULL; - comp->emit_inline_asm_method_table = &ASM_EMITTER(method_table); + comp->emit_inline_asm_method_table = ASM_EMITTER_TABLE; compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); #if MICROPY_EMIT_INLINE_XTENSA // Xtensa requires an extra pass to compute size of l32r const table // TODO this can be improved by calculating it during SCOPE pass // but that requires some other structural changes to the asm emitters - compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); + #if MICROPY_DYNAMIC_COMPILER + if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_XTENSA) + #endif + { + compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE); + } #endif if (comp->compile_error == MP_OBJ_NULL) { compile_scope_inline_asm(comp, s, MP_PASS_EMIT); } + } else #endif - - } else { + { // choose the emit type @@ -3433,11 +3528,10 @@ mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_f case MP_EMIT_OPT_NATIVE_PYTHON: case MP_EMIT_OPT_VIPER: if (emit_native == NULL) { - emit_native = NATIVE_EMITTER(new)(&comp->compile_error, max_num_labels); + emit_native = NATIVE_EMITTER(new)(&comp->compile_error, &comp->next_label, max_num_labels); } - comp->emit_method_table = &NATIVE_EMITTER(method_table); + comp->emit_method_table = NATIVE_EMITTER_TABLE; comp->emit = emit_native; - EMIT_ARG(set_native_type, MP_EMIT_NATIVE_TYPE_ENABLE, s->emit_options == MP_EMIT_OPT_VIPER, 0); break; #endif // MICROPY_EMIT_NATIVE diff --git a/python/src/py/compile.h b/python/src/py/compile.h index 3297e83ae..99a17a8d1 100644 --- a/python/src/py/compile.h +++ b/python/src/py/compile.h @@ -30,15 +30,6 @@ #include "py/parse.h" #include "py/emitglue.h" -// These must fit in 8 bits; see scope.h -enum { - MP_EMIT_OPT_NONE, - MP_EMIT_OPT_BYTECODE, - MP_EMIT_OPT_NATIVE_PYTHON, - MP_EMIT_OPT_VIPER, - MP_EMIT_OPT_ASM, -}; - // the compiler will raise an exception if an error occurred // the compiler will clear the parse tree before it returns mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, uint emit_opt, bool is_repl); diff --git a/python/src/py/emit.h b/python/src/py/emit.h index 270a40633..70f7bf122 100644 --- a/python/src/py/emit.h +++ b/python/src/py/emit.h @@ -51,21 +51,58 @@ typedef enum { #define MP_EMIT_BREAK_FROM_FOR (0x8000) -#define MP_EMIT_NATIVE_TYPE_ENABLE (0) -#define MP_EMIT_NATIVE_TYPE_RETURN (1) -#define MP_EMIT_NATIVE_TYPE_ARG (2) +// Kind for emit_id_ops->local() +#define MP_EMIT_IDOP_LOCAL_FAST (0) +#define MP_EMIT_IDOP_LOCAL_DEREF (1) + +// Kind for emit_id_ops->global() +#define MP_EMIT_IDOP_GLOBAL_NAME (0) +#define MP_EMIT_IDOP_GLOBAL_GLOBAL (1) + +// Kind for emit->import() +#define MP_EMIT_IMPORT_NAME (0) +#define MP_EMIT_IMPORT_FROM (1) +#define MP_EMIT_IMPORT_STAR (2) + +// Kind for emit->subscr() +#define MP_EMIT_SUBSCR_LOAD (0) +#define MP_EMIT_SUBSCR_STORE (1) +#define MP_EMIT_SUBSCR_DELETE (2) + +// Kind for emit->attr() +#define MP_EMIT_ATTR_LOAD (0) +#define MP_EMIT_ATTR_STORE (1) +#define MP_EMIT_ATTR_DELETE (2) + +// Kind for emit->setup_block() +#define MP_EMIT_SETUP_BLOCK_WITH (0) +#define MP_EMIT_SETUP_BLOCK_EXCEPT (2) +#define MP_EMIT_SETUP_BLOCK_FINALLY (3) + +// Kind for emit->build() +#define MP_EMIT_BUILD_TUPLE (0) +#define MP_EMIT_BUILD_LIST (1) +#define MP_EMIT_BUILD_MAP (3) +#define MP_EMIT_BUILD_SET (6) +#define MP_EMIT_BUILD_SLICE (8) + +// Kind for emit->yield() +#define MP_EMIT_YIELD_VALUE (0) +#define MP_EMIT_YIELD_FROM (1) typedef struct _emit_t emit_t; typedef struct _mp_emit_method_table_id_ops_t { - void (*fast)(emit_t *emit, qstr qst, mp_uint_t local_num); - void (*deref)(emit_t *emit, qstr qst, mp_uint_t local_num); - void (*name)(emit_t *emit, qstr qst); - void (*global)(emit_t *emit, qstr qst); + void (*local)(emit_t *emit, qstr qst, mp_uint_t local_num, int kind); + void (*global)(emit_t *emit, qstr qst, int kind); } mp_emit_method_table_id_ops_t; typedef struct _emit_method_table_t { - void (*set_native_type)(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2); + #if MICROPY_DYNAMIC_COMPILER + emit_t *(*emit_new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels); + void (*emit_free)(emit_t *emit); + #endif + void (*start_pass)(emit_t *emit, pass_kind_t pass, scope_t *scope); void (*end_pass)(emit_t *emit); bool (*last_emit_was_return_value)(emit_t *emit); @@ -77,22 +114,16 @@ typedef struct _emit_method_table_t { mp_emit_method_table_id_ops_t delete_id; void (*label_assign)(emit_t *emit, mp_uint_t l); - void (*import_name)(emit_t *emit, qstr qst); - void (*import_from)(emit_t *emit, qstr qst); - void (*import_star)(emit_t *emit); + void (*import)(emit_t *emit, qstr qst, int kind); void (*load_const_tok)(emit_t *emit, mp_token_kind_t tok); void (*load_const_small_int)(emit_t *emit, mp_int_t arg); void (*load_const_str)(emit_t *emit, qstr qst); void (*load_const_obj)(emit_t *emit, mp_obj_t obj); void (*load_null)(emit_t *emit); - void (*load_attr)(emit_t *emit, qstr qst); void (*load_method)(emit_t *emit, qstr qst, bool is_super); void (*load_build_class)(emit_t *emit); - void (*load_subscr)(emit_t *emit); - void (*store_attr)(emit_t *emit, qstr qst); - void (*store_subscr)(emit_t *emit); - void (*delete_attr)(emit_t *emit, qstr qst); - void (*delete_subscr)(emit_t *emit); + void (*subscr)(emit_t *emit, int kind); + void (*attr)(emit_t *emit, qstr qst, int kind); void (*dup_top)(emit_t *emit); void (*dup_top_two)(emit_t *emit); void (*pop_top)(emit_t *emit); @@ -101,30 +132,18 @@ typedef struct _emit_method_table_t { void (*jump)(emit_t *emit, mp_uint_t label); void (*pop_jump_if)(emit_t *emit, bool cond, mp_uint_t label); void (*jump_if_or_pop)(emit_t *emit, bool cond, mp_uint_t label); - void (*break_loop)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth); - void (*continue_loop)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth); - void (*setup_with)(emit_t *emit, mp_uint_t label); + void (*unwind_jump)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth); + void (*setup_block)(emit_t *emit, mp_uint_t label, int kind); void (*with_cleanup)(emit_t *emit, mp_uint_t label); - void (*setup_except)(emit_t *emit, mp_uint_t label); - void (*setup_finally)(emit_t *emit, mp_uint_t label); void (*end_finally)(emit_t *emit); void (*get_iter)(emit_t *emit, bool use_stack); void (*for_iter)(emit_t *emit, mp_uint_t label); void (*for_iter_end)(emit_t *emit); - void (*pop_block)(emit_t *emit); - void (*pop_except)(emit_t *emit); + void (*pop_except_jump)(emit_t *emit, mp_uint_t label, bool within_exc_handler); void (*unary_op)(emit_t *emit, mp_unary_op_t op); void (*binary_op)(emit_t *emit, mp_binary_op_t op); - void (*build_tuple)(emit_t *emit, mp_uint_t n_args); - void (*build_list)(emit_t *emit, mp_uint_t n_args); - void (*build_map)(emit_t *emit, mp_uint_t n_args); + void (*build)(emit_t *emit, mp_uint_t n_args, int kind); void (*store_map)(emit_t *emit); - #if MICROPY_PY_BUILTINS_SET - void (*build_set)(emit_t *emit, mp_uint_t n_args); - #endif - #if MICROPY_PY_BUILTINS_SLICE - void (*build_slice)(emit_t *emit, mp_uint_t n_args); - #endif void (*store_comp)(emit_t *emit, scope_kind_t kind, mp_uint_t set_stack_index); void (*unpack_sequence)(emit_t *emit, mp_uint_t n_args); void (*unpack_ex)(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right); @@ -134,8 +153,7 @@ typedef struct _emit_method_table_t { void (*call_method)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags); void (*return_value)(emit_t *emit); void (*raise_varargs)(emit_t *emit, mp_uint_t n_args); - void (*yield_value)(emit_t *emit); - void (*yield_from)(emit_t *emit); + void (*yield)(emit_t *emit, int kind); // these methods are used to control entry to/exit from an exception handler // they may or may not emit code @@ -143,7 +161,10 @@ typedef struct _emit_method_table_t { void (*end_except_handler)(emit_t *emit); } emit_method_table_t; -void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst); +static inline void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst) { + scope_find_or_add_id(scope, qst, ID_INFO_KIND_GLOBAL_IMPLICIT); +} + void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst); void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst); @@ -159,11 +180,11 @@ extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops; extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops; emit_t *emit_bc_new(void); -emit_t *emit_native_x64_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); -emit_t *emit_native_x86_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); -emit_t *emit_native_thumb_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); -emit_t *emit_native_arm_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); -emit_t *emit_native_xtensa_new(mp_obj_t *error_slot, mp_uint_t max_num_labels); +emit_t *emit_native_x64_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels); +emit_t *emit_native_x86_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels); +emit_t *emit_native_thumb_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels); +emit_t *emit_native_arm_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels); +emit_t *emit_native_xtensa_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels); void emit_bc_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels); @@ -180,36 +201,24 @@ bool mp_emit_bc_last_emit_was_return_value(emit_t *emit); void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta); void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t line); -void mp_emit_bc_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num); -void mp_emit_bc_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num); -void mp_emit_bc_load_name(emit_t *emit, qstr qst); -void mp_emit_bc_load_global(emit_t *emit, qstr qst); -void mp_emit_bc_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num); -void mp_emit_bc_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num); -void mp_emit_bc_store_name(emit_t *emit, qstr qst); -void mp_emit_bc_store_global(emit_t *emit, qstr qst); -void mp_emit_bc_delete_fast(emit_t *emit, qstr qst, mp_uint_t local_num); -void mp_emit_bc_delete_deref(emit_t *emit, qstr qst, mp_uint_t local_num); -void mp_emit_bc_delete_name(emit_t *emit, qstr qst); -void mp_emit_bc_delete_global(emit_t *emit, qstr qst); +void mp_emit_bc_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind); +void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind); +void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind); +void mp_emit_bc_store_global(emit_t *emit, qstr qst, int kind); +void mp_emit_bc_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind); +void mp_emit_bc_delete_global(emit_t *emit, qstr qst, int kind); void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l); -void mp_emit_bc_import_name(emit_t *emit, qstr qst); -void mp_emit_bc_import_from(emit_t *emit, qstr qst); -void mp_emit_bc_import_star(emit_t *emit); +void mp_emit_bc_import(emit_t *emit, qstr qst, int kind); void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok); void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg); void mp_emit_bc_load_const_str(emit_t *emit, qstr qst); void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj); void mp_emit_bc_load_null(emit_t *emit); -void mp_emit_bc_load_attr(emit_t *emit, qstr qst); void mp_emit_bc_load_method(emit_t *emit, qstr qst, bool is_super); void mp_emit_bc_load_build_class(emit_t *emit); -void mp_emit_bc_load_subscr(emit_t *emit); -void mp_emit_bc_store_attr(emit_t *emit, qstr qst); -void mp_emit_bc_store_subscr(emit_t *emit); -void mp_emit_bc_delete_attr(emit_t *emit, qstr qst); -void mp_emit_bc_delete_subscr(emit_t *emit); +void mp_emit_bc_subscr(emit_t *emit, int kind); +void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind); void mp_emit_bc_dup_top(emit_t *emit); void mp_emit_bc_dup_top_two(emit_t *emit); void mp_emit_bc_pop_top(emit_t *emit); @@ -219,30 +228,17 @@ void mp_emit_bc_jump(emit_t *emit, mp_uint_t label); void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label); void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label); void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth); -#define mp_emit_bc_break_loop mp_emit_bc_unwind_jump -#define mp_emit_bc_continue_loop mp_emit_bc_unwind_jump -void mp_emit_bc_setup_with(emit_t *emit, mp_uint_t label); +void mp_emit_bc_setup_block(emit_t *emit, mp_uint_t label, int kind); void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label); -void mp_emit_bc_setup_except(emit_t *emit, mp_uint_t label); -void mp_emit_bc_setup_finally(emit_t *emit, mp_uint_t label); void mp_emit_bc_end_finally(emit_t *emit); void mp_emit_bc_get_iter(emit_t *emit, bool use_stack); void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label); void mp_emit_bc_for_iter_end(emit_t *emit); -void mp_emit_bc_pop_block(emit_t *emit); -void mp_emit_bc_pop_except(emit_t *emit); +void mp_emit_bc_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler); void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op); void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op); -void mp_emit_bc_build_tuple(emit_t *emit, mp_uint_t n_args); -void mp_emit_bc_build_list(emit_t *emit, mp_uint_t n_args); -void mp_emit_bc_build_map(emit_t *emit, mp_uint_t n_args); +void mp_emit_bc_build(emit_t *emit, mp_uint_t n_args, int kind); void mp_emit_bc_store_map(emit_t *emit); -#if MICROPY_PY_BUILTINS_SET -void mp_emit_bc_build_set(emit_t *emit, mp_uint_t n_args); -#endif -#if MICROPY_PY_BUILTINS_SLICE -void mp_emit_bc_build_slice(emit_t *emit, mp_uint_t n_args); -#endif void mp_emit_bc_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t list_stack_index); void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args); void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right); @@ -252,14 +248,18 @@ void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_ void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags); void mp_emit_bc_return_value(emit_t *emit); void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args); -void mp_emit_bc_yield_value(emit_t *emit); -void mp_emit_bc_yield_from(emit_t *emit); +void mp_emit_bc_yield(emit_t *emit, int kind); void mp_emit_bc_start_except_handler(emit_t *emit); void mp_emit_bc_end_except_handler(emit_t *emit); typedef struct _emit_inline_asm_t emit_inline_asm_t; typedef struct _emit_inline_asm_method_table_t { + #if MICROPY_DYNAMIC_COMPILER + emit_inline_asm_t *(*asm_new)(mp_uint_t max_num_labels); + void (*asm_free)(emit_inline_asm_t *emit); + #endif + void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot); void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig); mp_uint_t (*count_params)(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params); diff --git a/python/src/py/emitbc.c b/python/src/py/emitbc.c index 32e833000..35eb6df9c 100644 --- a/python/src/py/emitbc.c +++ b/python/src/py/emitbc.c @@ -315,7 +315,7 @@ void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) { emit->last_source_line = 1; #ifndef NDEBUG // With debugging enabled labels are checked for unique assignment - if (pass < MP_PASS_EMIT) { + if (pass < MP_PASS_EMIT && emit->label_offsets != NULL) { memset(emit->label_offsets, -1, emit->max_num_labels * sizeof(mp_uint_t)); } #endif @@ -331,6 +331,10 @@ void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) { // the highest slot in the state (fastn[0], see vm.c). n_state = 1; } + #if MICROPY_DEBUG_VM_STACK_OVERFLOW + // An extra slot in the stack is needed to detect VM stack overflow + n_state += 1; + #endif emit_write_code_info_uint(emit, n_state); emit_write_code_info_uint(emit, scope->exc_stack_size); } @@ -504,19 +508,19 @@ void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l) { } } -void mp_emit_bc_import_name(emit_t *emit, qstr qst) { - emit_bc_pre(emit, -1); - emit_write_bytecode_byte_qstr(emit, MP_BC_IMPORT_NAME, qst); -} - -void mp_emit_bc_import_from(emit_t *emit, qstr qst) { - emit_bc_pre(emit, 1); - emit_write_bytecode_byte_qstr(emit, MP_BC_IMPORT_FROM, qst); -} - -void mp_emit_bc_import_star(emit_t *emit) { - emit_bc_pre(emit, -1); - emit_write_bytecode_byte(emit, MP_BC_IMPORT_STAR); +void mp_emit_bc_import(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_BC_IMPORT_NAME + MP_EMIT_IMPORT_NAME == MP_BC_IMPORT_NAME); + MP_STATIC_ASSERT(MP_BC_IMPORT_NAME + MP_EMIT_IMPORT_FROM == MP_BC_IMPORT_FROM); + if (kind == MP_EMIT_IMPORT_FROM) { + emit_bc_pre(emit, 1); + } else { + emit_bc_pre(emit, -1); + } + if (kind == MP_EMIT_IMPORT_STAR) { + emit_write_bytecode_byte(emit, MP_BC_IMPORT_STAR); + } else { + emit_write_bytecode_byte_qstr(emit, MP_BC_IMPORT_NAME + kind, qst); + } } void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok) { @@ -556,43 +560,24 @@ void mp_emit_bc_load_null(emit_t *emit) { emit_write_bytecode_byte(emit, MP_BC_LOAD_NULL); } -void mp_emit_bc_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) { +void mp_emit_bc_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) { + MP_STATIC_ASSERT(MP_BC_LOAD_FAST_N + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_LOAD_FAST_N); + MP_STATIC_ASSERT(MP_BC_LOAD_FAST_N + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_LOAD_DEREF); (void)qst; emit_bc_pre(emit, 1); - if (local_num <= 15) { + if (kind == MP_EMIT_IDOP_LOCAL_FAST && local_num <= 15) { emit_write_bytecode_byte(emit, MP_BC_LOAD_FAST_MULTI + local_num); } else { - emit_write_bytecode_byte_uint(emit, MP_BC_LOAD_FAST_N, local_num); + emit_write_bytecode_byte_uint(emit, MP_BC_LOAD_FAST_N + kind, local_num); } } -void mp_emit_bc_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num) { +void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_LOAD_NAME); + MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_LOAD_GLOBAL); (void)qst; emit_bc_pre(emit, 1); - emit_write_bytecode_byte_uint(emit, MP_BC_LOAD_DEREF, local_num); -} - -void mp_emit_bc_load_name(emit_t *emit, qstr qst) { - (void)qst; - emit_bc_pre(emit, 1); - emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_NAME, qst); - if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) { - emit_write_bytecode_byte(emit, 0); - } -} - -void mp_emit_bc_load_global(emit_t *emit, qstr qst) { - (void)qst; - emit_bc_pre(emit, 1); - emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_GLOBAL, qst); - if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) { - emit_write_bytecode_byte(emit, 0); - } -} - -void mp_emit_bc_load_attr(emit_t *emit, qstr qst) { - emit_bc_pre(emit, 0); - emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_ATTR, qst); + emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_NAME + kind, qst); if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) { emit_write_bytecode_byte(emit, 0); } @@ -608,80 +593,68 @@ void mp_emit_bc_load_build_class(emit_t *emit) { emit_write_bytecode_byte(emit, MP_BC_LOAD_BUILD_CLASS); } -void mp_emit_bc_load_subscr(emit_t *emit) { - emit_bc_pre(emit, -1); - emit_write_bytecode_byte(emit, MP_BC_LOAD_SUBSCR); -} - -void mp_emit_bc_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num) { - (void)qst; - emit_bc_pre(emit, -1); - if (local_num <= 15) { - emit_write_bytecode_byte(emit, MP_BC_STORE_FAST_MULTI + local_num); +void mp_emit_bc_subscr(emit_t *emit, int kind) { + if (kind == MP_EMIT_SUBSCR_LOAD) { + emit_bc_pre(emit, -1); + emit_write_bytecode_byte(emit, MP_BC_LOAD_SUBSCR); } else { - emit_write_bytecode_byte_uint(emit, MP_BC_STORE_FAST_N, local_num); + if (kind == MP_EMIT_SUBSCR_DELETE) { + mp_emit_bc_load_null(emit); + mp_emit_bc_rot_three(emit); + } + emit_bc_pre(emit, -3); + emit_write_bytecode_byte(emit, MP_BC_STORE_SUBSCR); } } -void mp_emit_bc_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num) { - (void)qst; - emit_bc_pre(emit, -1); - emit_write_bytecode_byte_uint(emit, MP_BC_STORE_DEREF, local_num); -} - -void mp_emit_bc_store_name(emit_t *emit, qstr qst) { - emit_bc_pre(emit, -1); - emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_NAME, qst); -} - -void mp_emit_bc_store_global(emit_t *emit, qstr qst) { - emit_bc_pre(emit, -1); - emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_GLOBAL, qst); -} - -void mp_emit_bc_store_attr(emit_t *emit, qstr qst) { - emit_bc_pre(emit, -2); - emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_ATTR, qst); +void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind) { + if (kind == MP_EMIT_ATTR_LOAD) { + emit_bc_pre(emit, 0); + emit_write_bytecode_byte_qstr(emit, MP_BC_LOAD_ATTR, qst); + } else { + if (kind == MP_EMIT_ATTR_DELETE) { + mp_emit_bc_load_null(emit); + mp_emit_bc_rot_two(emit); + } + emit_bc_pre(emit, -2); + emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_ATTR, qst); + } if (MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE_DYNAMIC) { emit_write_bytecode_byte(emit, 0); } } -void mp_emit_bc_store_subscr(emit_t *emit) { - emit_bc_pre(emit, -3); - emit_write_bytecode_byte(emit, MP_BC_STORE_SUBSCR); -} - -void mp_emit_bc_delete_fast(emit_t *emit, qstr qst, mp_uint_t local_num) { +void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) { + MP_STATIC_ASSERT(MP_BC_STORE_FAST_N + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_STORE_FAST_N); + MP_STATIC_ASSERT(MP_BC_STORE_FAST_N + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_STORE_DEREF); (void)qst; - emit_write_bytecode_byte_uint(emit, MP_BC_DELETE_FAST, local_num); + emit_bc_pre(emit, -1); + if (kind == MP_EMIT_IDOP_LOCAL_FAST && local_num <= 15) { + emit_write_bytecode_byte(emit, MP_BC_STORE_FAST_MULTI + local_num); + } else { + emit_write_bytecode_byte_uint(emit, MP_BC_STORE_FAST_N + kind, local_num); + } } -void mp_emit_bc_delete_deref(emit_t *emit, qstr qst, mp_uint_t local_num) { +void mp_emit_bc_store_global(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_BC_STORE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_STORE_NAME); + MP_STATIC_ASSERT(MP_BC_STORE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_STORE_GLOBAL); + emit_bc_pre(emit, -1); + emit_write_bytecode_byte_qstr(emit, MP_BC_STORE_NAME + kind, qst); +} + +void mp_emit_bc_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) { + MP_STATIC_ASSERT(MP_BC_DELETE_FAST + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_DELETE_FAST); + MP_STATIC_ASSERT(MP_BC_DELETE_FAST + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_DELETE_DEREF); (void)qst; - emit_write_bytecode_byte_uint(emit, MP_BC_DELETE_DEREF, local_num); + emit_write_bytecode_byte_uint(emit, MP_BC_DELETE_FAST + kind, local_num); } -void mp_emit_bc_delete_name(emit_t *emit, qstr qst) { +void mp_emit_bc_delete_global(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_BC_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_DELETE_NAME); + MP_STATIC_ASSERT(MP_BC_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_DELETE_GLOBAL); emit_bc_pre(emit, 0); - emit_write_bytecode_byte_qstr(emit, MP_BC_DELETE_NAME, qst); -} - -void mp_emit_bc_delete_global(emit_t *emit, qstr qst) { - emit_bc_pre(emit, 0); - emit_write_bytecode_byte_qstr(emit, MP_BC_DELETE_GLOBAL, qst); -} - -void mp_emit_bc_delete_attr(emit_t *emit, qstr qst) { - mp_emit_bc_load_null(emit); - mp_emit_bc_rot_two(emit); - mp_emit_bc_store_attr(emit, qst); -} - -void mp_emit_bc_delete_subscr(emit_t *emit) { - mp_emit_bc_load_null(emit); - mp_emit_bc_rot_three(emit); - mp_emit_bc_store_subscr(emit); + emit_write_bytecode_byte_qstr(emit, MP_BC_DELETE_NAME + kind, qst); } void mp_emit_bc_dup_top(emit_t *emit) { @@ -750,30 +723,26 @@ void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_dept } } -void mp_emit_bc_setup_with(emit_t *emit, mp_uint_t label) { +void mp_emit_bc_setup_block(emit_t *emit, mp_uint_t label, int kind) { + MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_WITH == MP_BC_SETUP_WITH); + MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_EXCEPT == MP_BC_SETUP_EXCEPT); + MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_FINALLY == MP_BC_SETUP_FINALLY); + if (kind == MP_EMIT_SETUP_BLOCK_WITH) { // The SETUP_WITH opcode pops ctx_mgr from the top of the stack // and then pushes 3 entries: __exit__, ctx_mgr, as_value. - emit_bc_pre(emit, 2); - emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_WITH, label); + emit_bc_pre(emit, 2); + } else { + emit_bc_pre(emit, 0); + } + emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_WITH + kind, label); } void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label) { - mp_emit_bc_pop_block(emit); mp_emit_bc_load_const_tok(emit, MP_TOKEN_KW_NONE); mp_emit_bc_label_assign(emit, label); emit_bc_pre(emit, 2); // ensure we have enough stack space to call the __exit__ method emit_write_bytecode_byte(emit, MP_BC_WITH_CLEANUP); - emit_bc_pre(emit, -4); // cancel the 2 above, plus the 2 from mp_emit_bc_setup_with -} - -void mp_emit_bc_setup_except(emit_t *emit, mp_uint_t label) { - emit_bc_pre(emit, 0); - emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_EXCEPT, label); -} - -void mp_emit_bc_setup_finally(emit_t *emit, mp_uint_t label) { - emit_bc_pre(emit, 0); - emit_write_bytecode_byte_unsigned_label(emit, MP_BC_SETUP_FINALLY, label); + emit_bc_pre(emit, -4); // cancel the 2 above, plus the 2 from mp_emit_bc_setup_block(MP_EMIT_SETUP_BLOCK_WITH) } void mp_emit_bc_end_finally(emit_t *emit) { @@ -795,14 +764,10 @@ void mp_emit_bc_for_iter_end(emit_t *emit) { emit_bc_pre(emit, -MP_OBJ_ITER_BUF_NSLOTS); } -void mp_emit_bc_pop_block(emit_t *emit) { +void mp_emit_bc_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) { + (void)within_exc_handler; emit_bc_pre(emit, 0); - emit_write_bytecode_byte(emit, MP_BC_POP_BLOCK); -} - -void mp_emit_bc_pop_except(emit_t *emit) { - emit_bc_pre(emit, 0); - emit_write_bytecode_byte(emit, MP_BC_POP_EXCEPT); + emit_write_bytecode_byte_unsigned_label(emit, MP_BC_POP_EXCEPT_JUMP, label); } void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op) { @@ -827,19 +792,18 @@ void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op) { } } -void mp_emit_bc_build_tuple(emit_t *emit, mp_uint_t n_args) { - emit_bc_pre(emit, 1 - n_args); - emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_TUPLE, n_args); -} - -void mp_emit_bc_build_list(emit_t *emit, mp_uint_t n_args) { - emit_bc_pre(emit, 1 - n_args); - emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_LIST, n_args); -} - -void mp_emit_bc_build_map(emit_t *emit, mp_uint_t n_args) { - emit_bc_pre(emit, 1); - emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_MAP, n_args); +void mp_emit_bc_build(emit_t *emit, mp_uint_t n_args, int kind) { + MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_TUPLE == MP_BC_BUILD_TUPLE); + MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_LIST == MP_BC_BUILD_LIST); + MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_MAP == MP_BC_BUILD_MAP); + MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_SET == MP_BC_BUILD_SET); + MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_SLICE == MP_BC_BUILD_SLICE); + if (kind == MP_EMIT_BUILD_MAP) { + emit_bc_pre(emit, 1); + } else { + emit_bc_pre(emit, 1 - n_args); + } + emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_TUPLE + kind, n_args); } void mp_emit_bc_store_map(emit_t *emit) { @@ -847,20 +811,6 @@ void mp_emit_bc_store_map(emit_t *emit) { emit_write_bytecode_byte(emit, MP_BC_STORE_MAP); } -#if MICROPY_PY_BUILTINS_SET -void mp_emit_bc_build_set(emit_t *emit, mp_uint_t n_args) { - emit_bc_pre(emit, 1 - n_args); - emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_SET, n_args); -} -#endif - -#if MICROPY_PY_BUILTINS_SLICE -void mp_emit_bc_build_slice(emit_t *emit, mp_uint_t n_args) { - emit_bc_pre(emit, 1 - n_args); - emit_write_bytecode_byte_uint(emit, MP_BC_BUILD_SLICE, n_args); -} -#endif - void mp_emit_bc_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t collection_stack_index) { int t; int n; @@ -942,16 +892,11 @@ void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args) { emit_write_bytecode_byte_byte(emit, MP_BC_RAISE_VARARGS, n_args); } -void mp_emit_bc_yield_value(emit_t *emit) { - emit_bc_pre(emit, 0); +void mp_emit_bc_yield(emit_t *emit, int kind) { + MP_STATIC_ASSERT(MP_BC_YIELD_VALUE + 1 == MP_BC_YIELD_FROM); + emit_bc_pre(emit, -kind); emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR; - emit_write_bytecode_byte(emit, MP_BC_YIELD_VALUE); -} - -void mp_emit_bc_yield_from(emit_t *emit) { - emit_bc_pre(emit, -1); - emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR; - emit_write_bytecode_byte(emit, MP_BC_YIELD_FROM); + emit_write_bytecode_byte(emit, MP_BC_YIELD_VALUE + kind); } void mp_emit_bc_start_except_handler(emit_t *emit) { @@ -964,7 +909,11 @@ void mp_emit_bc_end_except_handler(emit_t *emit) { #if MICROPY_EMIT_NATIVE const emit_method_table_t emit_bc_method_table = { - NULL, // set_native_type is never called when emitting bytecode + #if MICROPY_DYNAMIC_COMPILER + NULL, + NULL, + #endif + mp_emit_bc_start_pass, mp_emit_bc_end_pass, mp_emit_bc_last_emit_was_return_value, @@ -972,41 +921,29 @@ const emit_method_table_t emit_bc_method_table = { mp_emit_bc_set_source_line, { - mp_emit_bc_load_fast, - mp_emit_bc_load_deref, - mp_emit_bc_load_name, + mp_emit_bc_load_local, mp_emit_bc_load_global, }, { - mp_emit_bc_store_fast, - mp_emit_bc_store_deref, - mp_emit_bc_store_name, + mp_emit_bc_store_local, mp_emit_bc_store_global, }, { - mp_emit_bc_delete_fast, - mp_emit_bc_delete_deref, - mp_emit_bc_delete_name, + mp_emit_bc_delete_local, mp_emit_bc_delete_global, }, mp_emit_bc_label_assign, - mp_emit_bc_import_name, - mp_emit_bc_import_from, - mp_emit_bc_import_star, + mp_emit_bc_import, mp_emit_bc_load_const_tok, mp_emit_bc_load_const_small_int, mp_emit_bc_load_const_str, mp_emit_bc_load_const_obj, mp_emit_bc_load_null, - mp_emit_bc_load_attr, mp_emit_bc_load_method, mp_emit_bc_load_build_class, - mp_emit_bc_load_subscr, - mp_emit_bc_store_attr, - mp_emit_bc_store_subscr, - mp_emit_bc_delete_attr, - mp_emit_bc_delete_subscr, + mp_emit_bc_subscr, + mp_emit_bc_attr, mp_emit_bc_dup_top, mp_emit_bc_dup_top_two, mp_emit_bc_pop_top, @@ -1016,29 +953,17 @@ const emit_method_table_t emit_bc_method_table = { mp_emit_bc_pop_jump_if, mp_emit_bc_jump_if_or_pop, mp_emit_bc_unwind_jump, - mp_emit_bc_unwind_jump, - mp_emit_bc_setup_with, + mp_emit_bc_setup_block, mp_emit_bc_with_cleanup, - mp_emit_bc_setup_except, - mp_emit_bc_setup_finally, mp_emit_bc_end_finally, mp_emit_bc_get_iter, mp_emit_bc_for_iter, mp_emit_bc_for_iter_end, - mp_emit_bc_pop_block, - mp_emit_bc_pop_except, + mp_emit_bc_pop_except_jump, mp_emit_bc_unary_op, mp_emit_bc_binary_op, - mp_emit_bc_build_tuple, - mp_emit_bc_build_list, - mp_emit_bc_build_map, + mp_emit_bc_build, mp_emit_bc_store_map, - #if MICROPY_PY_BUILTINS_SET - mp_emit_bc_build_set, - #endif - #if MICROPY_PY_BUILTINS_SLICE - mp_emit_bc_build_slice, - #endif mp_emit_bc_store_comp, mp_emit_bc_unpack_sequence, mp_emit_bc_unpack_ex, @@ -1048,31 +973,24 @@ const emit_method_table_t emit_bc_method_table = { mp_emit_bc_call_method, mp_emit_bc_return_value, mp_emit_bc_raise_varargs, - mp_emit_bc_yield_value, - mp_emit_bc_yield_from, + mp_emit_bc_yield, mp_emit_bc_start_except_handler, mp_emit_bc_end_except_handler, }; #else const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops = { - mp_emit_bc_load_fast, - mp_emit_bc_load_deref, - mp_emit_bc_load_name, + mp_emit_bc_load_local, mp_emit_bc_load_global, }; const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops = { - mp_emit_bc_store_fast, - mp_emit_bc_store_deref, - mp_emit_bc_store_name, + mp_emit_bc_store_local, mp_emit_bc_store_global, }; const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops = { - mp_emit_bc_delete_fast, - mp_emit_bc_delete_deref, - mp_emit_bc_delete_name, + mp_emit_bc_delete_local, mp_emit_bc_delete_global, }; #endif diff --git a/python/src/py/emitcommon.c b/python/src/py/emitcommon.c index 07b1dbb4c..791bf398a 100644 --- a/python/src/py/emitcommon.c +++ b/python/src/py/emitcommon.c @@ -30,26 +30,10 @@ #if MICROPY_ENABLE_COMPILER -void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst) { - // name adding/lookup - bool added; - id_info_t *id = scope_find_or_add_id(scope, qst, &added); - if (added) { - scope_find_local_and_close_over(scope, id, qst); - } -} - void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst) { // name adding/lookup - bool added; - id_info_t *id = scope_find_or_add_id(scope, qst, &added); - if (added) { - if (SCOPE_IS_FUNC_LIKE(scope->kind)) { - id->kind = ID_INFO_KIND_LOCAL; - } else { - id->kind = ID_INFO_KIND_GLOBAL_IMPLICIT; - } - } else if (SCOPE_IS_FUNC_LIKE(scope->kind) && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) { + id_info_t *id = scope_find_or_add_id(scope, qst, ID_INFO_KIND_GLOBAL_IMPLICIT); + if (SCOPE_IS_FUNC_LIKE(scope->kind) && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) { // rebind as a local variable id->kind = ID_INFO_KIND_LOCAL; } @@ -63,14 +47,14 @@ void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emi // call the emit backend with the correct code if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) { - emit_method_table->name(emit, qst); + emit_method_table->global(emit, qst, MP_EMIT_IDOP_GLOBAL_NAME); } else if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) { - emit_method_table->global(emit, qst); + emit_method_table->global(emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL); } else if (id->kind == ID_INFO_KIND_LOCAL) { - emit_method_table->fast(emit, qst, id->local_num); + emit_method_table->local(emit, qst, id->local_num, MP_EMIT_IDOP_LOCAL_FAST); } else { assert(id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE); - emit_method_table->deref(emit, qst, id->local_num); + emit_method_table->local(emit, qst, id->local_num, MP_EMIT_IDOP_LOCAL_DEREF); } } diff --git a/python/src/py/emitglue.c b/python/src/py/emitglue.c index 74bf8ddca..c073258f0 100644 --- a/python/src/py/emitglue.c +++ b/python/src/py/emitglue.c @@ -67,15 +67,18 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, rc->kind = MP_CODE_BYTECODE; rc->scope_flags = scope_flags; - rc->data.u_byte.bytecode = code; - rc->data.u_byte.const_table = const_table; + rc->fun_data = code; + rc->const_table = const_table; #if MICROPY_PERSISTENT_CODE_SAVE - rc->data.u_byte.bc_len = len; - rc->data.u_byte.n_obj = n_obj; - rc->data.u_byte.n_raw_code = n_raw_code; + rc->fun_data_len = len; + rc->n_obj = n_obj; + rc->n_raw_code = n_raw_code; #endif #ifdef DEBUG_PRINT + #if !MICROPY_DEBUG_PRINTERS + const size_t len = 0; + #endif DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " flags=%x\n", code, len, (uint)scope_flags); #endif #if MICROPY_DEBUG_PRINTERS @@ -86,14 +89,31 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, } #if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM -void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) { +void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, + #if MICROPY_PERSISTENT_CODE_SAVE + uint16_t prelude_offset, + uint16_t n_obj, uint16_t n_raw_code, + uint16_t n_qstr, mp_qstr_link_entry_t *qstr_link, + #endif + mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) { + assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM); + rc->kind = kind; rc->scope_flags = scope_flags; rc->n_pos_args = n_pos_args; - rc->data.u_native.fun_data = fun_data; - rc->data.u_native.const_table = const_table; - rc->data.u_native.type_sig = type_sig; + rc->fun_data = fun_data; + rc->const_table = const_table; + rc->type_sig = type_sig; + + #if MICROPY_PERSISTENT_CODE_SAVE + rc->fun_data_len = fun_len; + rc->prelude_offset = prelude_offset; + rc->n_obj = n_obj; + rc->n_raw_code = n_raw_code; + rc->n_qstr= n_qstr; + rc->qstr_link = qstr_link; + #endif #ifdef DEBUG_PRINT DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " n_pos_args=" UINT_FMT " flags=%x\n", kind, fun_data, fun_len, n_pos_args, (uint)scope_flags); @@ -121,39 +141,40 @@ mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_ar assert(rc != NULL); // def_args must be MP_OBJ_NULL or a tuple - assert(def_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_args, &mp_type_tuple)); + assert(def_args == MP_OBJ_NULL || mp_obj_is_type(def_args, &mp_type_tuple)); // def_kw_args must be MP_OBJ_NULL or a dict - assert(def_kw_args == MP_OBJ_NULL || MP_OBJ_IS_TYPE(def_kw_args, &mp_type_dict)); + assert(def_kw_args == MP_OBJ_NULL || mp_obj_is_type(def_kw_args, &mp_type_dict)); // make the function, depending on the raw code kind mp_obj_t fun; switch (rc->kind) { #if MICROPY_EMIT_NATIVE case MP_CODE_NATIVE_PY: - fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->data.u_native.fun_data, rc->data.u_native.const_table); - break; case MP_CODE_NATIVE_VIPER: - fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); + fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->fun_data, rc->const_table); + // Check for a generator function, and if so change the type of the object + if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { + ((mp_obj_base_t*)MP_OBJ_TO_PTR(fun))->type = &mp_type_native_gen_wrap; + } break; #endif #if MICROPY_EMIT_INLINE_ASM case MP_CODE_NATIVE_ASM: - fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig); + fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->fun_data, rc->type_sig); break; #endif default: // rc->kind should always be set and BYTECODE is the only remaining case assert(rc->kind == MP_CODE_BYTECODE); - fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->data.u_byte.bytecode, rc->data.u_byte.const_table); + fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->fun_data, rc->const_table); + // check for generator functions and if so change the type of the object + if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { + ((mp_obj_base_t*)MP_OBJ_TO_PTR(fun))->type = &mp_type_gen_wrap; + } break; } - // check for generator functions and if so wrap in generator object - if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) { - fun = mp_obj_new_gen_wrap(fun); - } - return fun; } diff --git a/python/src/py/emitglue.h b/python/src/py/emitglue.h index 0830a0d5c..058f06018 100644 --- a/python/src/py/emitglue.h +++ b/python/src/py/emitglue.h @@ -30,6 +30,15 @@ // These variables and functions glue the code emitters to the runtime. +// These must fit in 8 bits; see scope.h +enum { + MP_EMIT_OPT_NONE, + MP_EMIT_OPT_BYTECODE, + MP_EMIT_OPT_NATIVE_PYTHON, + MP_EMIT_OPT_VIPER, + MP_EMIT_OPT_ASM, +}; + typedef enum { MP_CODE_UNUSED, MP_CODE_RESERVED, @@ -39,26 +48,30 @@ typedef enum { MP_CODE_NATIVE_ASM, } mp_raw_code_kind_t; +typedef struct _mp_qstr_link_entry_t { + uint16_t off; + uint16_t qst; +} mp_qstr_link_entry_t; + typedef struct _mp_raw_code_t { mp_uint_t kind : 3; // of type mp_raw_code_kind_t mp_uint_t scope_flags : 7; mp_uint_t n_pos_args : 11; - union { - struct { - const byte *bytecode; - const mp_uint_t *const_table; - #if MICROPY_PERSISTENT_CODE_SAVE - mp_uint_t bc_len; - uint16_t n_obj; - uint16_t n_raw_code; - #endif - } u_byte; - struct { - void *fun_data; - const mp_uint_t *const_table; - mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc - } u_native; - } data; + const void *fun_data; + const mp_uint_t *const_table; + #if MICROPY_PERSISTENT_CODE_SAVE + size_t fun_data_len; + uint16_t n_obj; + uint16_t n_raw_code; + #if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM + uint16_t prelude_offset; + uint16_t n_qstr; + mp_qstr_link_entry_t *qstr_link; + #endif + #endif + #if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM + mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc + #endif } mp_raw_code_t; mp_raw_code_t *mp_emit_glue_new_raw_code(void); @@ -72,7 +85,15 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code, uint16_t n_obj, uint16_t n_raw_code, #endif mp_uint_t scope_flags); -void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table, mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig); + +void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, + const mp_uint_t *const_table, + #if MICROPY_PERSISTENT_CODE_SAVE + uint16_t prelude_offset, + uint16_t n_obj, uint16_t n_raw_code, + uint16_t n_qstr, mp_qstr_link_entry_t *qstr_link, + #endif + mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig); mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args); mp_obj_t mp_make_closure_from_raw_code(const mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args); diff --git a/python/src/py/emitinlinethumb.c b/python/src/py/emitinlinethumb.c index 577f65672..020dfc815 100644 --- a/python/src/py/emitinlinethumb.c +++ b/python/src/py/emitinlinethumb.c @@ -301,7 +301,7 @@ STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node } uint32_t i = mp_obj_get_int_truncated(o); if ((i & (~fit_mask)) != 0) { - emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer 0x%x does not fit in mask 0x%x", op, i, fit_mask)); + emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer 0x%x doesn't fit in mask 0x%x", op, i, fit_mask)); return 0; } return i; @@ -812,6 +812,11 @@ branch_not_in_range: } const emit_inline_asm_method_table_t emit_inline_thumb_method_table = { + #if MICROPY_DYNAMIC_COMPILER + emit_inline_thumb_new, + emit_inline_thumb_free, + #endif + emit_inline_thumb_start_pass, emit_inline_thumb_end_pass, emit_inline_thumb_count_params, diff --git a/python/src/py/emitinlinextensa.c b/python/src/py/emitinlinextensa.c index 3d3217f5b..d10179127 100644 --- a/python/src/py/emitinlinextensa.c +++ b/python/src/py/emitinlinextensa.c @@ -171,7 +171,7 @@ STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node } uint32_t i = mp_obj_get_int_truncated(o); if (min != max && ((int)i < min || (int)i > max)) { - emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer %d is not within range %d..%d", op, i, min, max)); + emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer %d isn't within range %d..%d", op, i, min, max)); return 0; } return i; @@ -335,6 +335,11 @@ branch_not_in_range: } const emit_inline_asm_method_table_t emit_inline_xtensa_method_table = { + #if MICROPY_DYNAMIC_COMPILER + emit_inline_xtensa_new, + emit_inline_xtensa_free, + #endif + emit_inline_xtensa_start_pass, emit_inline_xtensa_end_pass, emit_inline_xtensa_count_params, diff --git a/python/src/py/emitnarm.c b/python/src/py/emitnarm.c index 1b585f821..8297ad619 100644 --- a/python/src/py/emitnarm.c +++ b/python/src/py/emitnarm.c @@ -8,6 +8,11 @@ #define GENERIC_ASM_API (1) #include "py/asmarm.h" +// Word indices of REG_LOCAL_x in nlr_buf_t +#define NLR_BUF_IDX_LOCAL_1 (3) // r4 +#define NLR_BUF_IDX_LOCAL_2 (4) // r5 +#define NLR_BUF_IDX_LOCAL_3 (5) // r6 + #define N_ARM (1) #define EXPORT_FUN(name) emit_native_arm_##name #include "py/emitnative.c" diff --git a/python/src/py/emitnative.c b/python/src/py/emitnative.c index 7e017ba39..f123ecbb5 100644 --- a/python/src/py/emitnative.c +++ b/python/src/py/emitnative.c @@ -59,12 +59,54 @@ // wrapper around everything in this file #if N_X64 || N_X86 || N_THUMB || N_ARM || N_XTENSA -// define additional generic helper macros -#define ASM_MOV_LOCAL_IMM_VIA(as, local_num, imm, reg_temp) \ - do { \ - ASM_MOV_REG_IMM((as), (reg_temp), (imm)); \ - ASM_MOV_LOCAL_REG((as), (local_num), (reg_temp)); \ - } while (false) +// C stack layout for native functions: +// 0: nlr_buf_t [optional] +// emit->code_state_start: mp_code_state_t +// emit->stack_start: Python object stack | emit->n_state +// locals (reversed, L0 at end) | +// +// C stack layout for native generator functions: +// 0=emit->stack_start: nlr_buf_t +// +// Then REG_GENERATOR_STATE points to: +// 0=emit->code_state_start: mp_code_state_t +// emit->stack_start: Python object stack | emit->n_state +// locals (reversed, L0 at end) | +// +// C stack layout for viper functions: +// 0: nlr_buf_t [optional] +// emit->code_state_start: fun_obj, old_globals [optional] +// emit->stack_start: Python object stack | emit->n_state +// locals (reversed, L0 at end) | +// (L0-L2 may be in regs instead) + +// Word index of nlr_buf_t.ret_val +#define NLR_BUF_IDX_RET_VAL (1) + +// Whether the viper function needs access to fun_obj +#define NEED_FUN_OBJ(emit) ((emit)->scope->exc_stack_size > 0 \ + || ((emit)->scope->scope_flags & (MP_SCOPE_FLAG_REFGLOBALS | MP_SCOPE_FLAG_HASCONSTS))) + +// Whether the native/viper function needs to be wrapped in an exception handler +#define NEED_GLOBAL_EXC_HANDLER(emit) ((emit)->scope->exc_stack_size > 0 \ + || ((emit)->scope->scope_flags & (MP_SCOPE_FLAG_GENERATOR | MP_SCOPE_FLAG_REFGLOBALS))) + +// Whether registers can be used to store locals (only true if there are no +// exception handlers, because otherwise an nlr_jump will restore registers to +// their state at the start of the function and updates to locals will be lost) +#define CAN_USE_REGS_FOR_LOCALS(emit) ((emit)->scope->exc_stack_size == 0 && !(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) + +// Indices within the local C stack for various variables +#define LOCAL_IDX_EXC_VAL(emit) (NLR_BUF_IDX_RET_VAL) +#define LOCAL_IDX_EXC_HANDLER_PC(emit) (NLR_BUF_IDX_LOCAL_1) +#define LOCAL_IDX_EXC_HANDLER_UNWIND(emit) (NLR_BUF_IDX_LOCAL_2) +#define LOCAL_IDX_RET_VAL(emit) (NLR_BUF_IDX_LOCAL_3) +#define LOCAL_IDX_FUN_OBJ(emit) ((emit)->code_state_start + offsetof(mp_code_state_t, fun_bc) / sizeof(uintptr_t)) +#define LOCAL_IDX_OLD_GLOBALS(emit) ((emit)->code_state_start + offsetof(mp_code_state_t, ip) / sizeof(uintptr_t)) +#define LOCAL_IDX_GEN_PC(emit) ((emit)->code_state_start + offsetof(mp_code_state_t, ip) / sizeof(uintptr_t)) +#define LOCAL_IDX_LOCAL_VAR(emit, local_num) ((emit)->stack_start + (emit)->n_state - 1 - (local_num)) + +#define REG_GENERATOR_STATE (REG_LOCAL_3) #define EMIT_NATIVE_VIPER_TYPE_ERROR(emit, ...) do { \ *emit->error_slot = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, __VA_ARGS__); \ @@ -117,14 +159,24 @@ typedef struct _stack_info_t { } data; } stack_info_t; +#define UNWIND_LABEL_UNUSED (0x7fff) +#define UNWIND_LABEL_DO_FINAL_UNWIND (0x7ffe) + +typedef struct _exc_stack_entry_t { + uint16_t label : 15; + uint16_t is_finally : 1; + uint16_t unwind_label : 15; + uint16_t is_active : 1; +} exc_stack_entry_t; + struct _emit_t { mp_obj_t *error_slot; + uint *label_slot; + uint exit_label; int pass; bool do_viper_types; - vtype_kind_t return_vtype; - mp_uint_t local_vtype_alloc; vtype_kind_t *local_vtype; @@ -132,12 +184,27 @@ struct _emit_t { stack_info_t *stack_info; vtype_kind_t saved_stack_vtype; + size_t exc_stack_alloc; + size_t exc_stack_size; + exc_stack_entry_t *exc_stack; + int prelude_offset; - int const_table_offset; + int start_offset; int n_state; - int stack_start; + uint16_t code_state_start; + uint16_t stack_start; int stack_size; + uint16_t const_table_cur_obj; + uint16_t const_table_num_obj; + uint16_t const_table_cur_raw_code; + mp_uint_t *const_table; + + #if MICROPY_PERSISTENT_CODE_SAVE + uint16_t qstr_link_cur; + mp_qstr_link_entry_t *qstr_link; + #endif + bool last_emit_was_return_value; scope_t *scope; @@ -145,9 +212,20 @@ struct _emit_t { ASM_T *as; }; -emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, mp_uint_t max_num_labels) { +STATIC const uint8_t reg_local_table[REG_LOCAL_NUM] = {REG_LOCAL_1, REG_LOCAL_2, REG_LOCAL_3}; + +STATIC void emit_native_global_exc_entry(emit_t *emit); +STATIC void emit_native_global_exc_exit(emit_t *emit); +STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj); + +emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels) { emit_t *emit = m_new0(emit_t, 1); emit->error_slot = error_slot; + emit->label_slot = label_slot; + emit->stack_info_alloc = 8; + emit->stack_info = m_new(stack_info_t, emit->stack_info_alloc); + emit->exc_stack_alloc = 8; + emit->exc_stack = m_new(exc_stack_entry_t, emit->exc_stack_alloc); emit->as = m_new0(ASM_T, 1); mp_asm_base_init(&emit->as->base, max_num_labels); return emit; @@ -156,54 +234,86 @@ emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, mp_uint_t max_num_labels) { void EXPORT_FUN(free)(emit_t *emit) { mp_asm_base_deinit(&emit->as->base, false); m_del_obj(ASM_T, emit->as); + m_del(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc); m_del(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc); m_del(stack_info_t, emit->stack_info, emit->stack_info_alloc); m_del_obj(emit_t, emit); } -STATIC void emit_native_set_native_type(emit_t *emit, mp_uint_t op, mp_uint_t arg1, qstr arg2) { - switch (op) { - case MP_EMIT_NATIVE_TYPE_ENABLE: - emit->do_viper_types = arg1; - break; +STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg); - default: { - vtype_kind_t type; - switch (arg2) { - case MP_QSTR_object: type = VTYPE_PYOBJ; break; - case MP_QSTR_bool: type = VTYPE_BOOL; break; - case MP_QSTR_int: type = VTYPE_INT; break; - case MP_QSTR_uint: type = VTYPE_UINT; break; - case MP_QSTR_ptr: type = VTYPE_PTR; break; - case MP_QSTR_ptr8: type = VTYPE_PTR8; break; - case MP_QSTR_ptr16: type = VTYPE_PTR16; break; - case MP_QSTR_ptr32: type = VTYPE_PTR32; break; - default: EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "unknown type '%q'", arg2); return; - } - if (op == MP_EMIT_NATIVE_TYPE_RETURN) { - emit->return_vtype = type; - } else { - assert(arg1 < emit->local_vtype_alloc); - emit->local_vtype[arg1] = type; - } - break; - } +STATIC void emit_native_mov_reg_const(emit_t *emit, int reg_dest, int const_val) { + ASM_LOAD_REG_REG_OFFSET(emit->as, reg_dest, REG_FUN_TABLE, const_val); +} + +STATIC void emit_native_mov_state_reg(emit_t *emit, int local_num, int reg_src) { + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + ASM_STORE_REG_REG_OFFSET(emit->as, reg_src, REG_GENERATOR_STATE, local_num); + } else { + ASM_MOV_LOCAL_REG(emit->as, local_num, reg_src); } } -STATIC void emit_pre_pop_reg(emit_t *emit, vtype_kind_t *vtype, int reg_dest); -STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg); -STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num); -STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num); +STATIC void emit_native_mov_reg_state(emit_t *emit, int reg_dest, int local_num) { + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + ASM_LOAD_REG_REG_OFFSET(emit->as, reg_dest, REG_GENERATOR_STATE, local_num); + } else { + ASM_MOV_REG_LOCAL(emit->as, reg_dest, local_num); + } +} -#define STATE_START (sizeof(mp_code_state_t) / sizeof(mp_uint_t)) +STATIC void emit_native_mov_reg_state_addr(emit_t *emit, int reg_dest, int local_num) { + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + ASM_MOV_REG_IMM(emit->as, reg_dest, local_num * ASM_WORD_SIZE); + ASM_ADD_REG_REG(emit->as, reg_dest, REG_GENERATOR_STATE); + } else { + ASM_MOV_REG_LOCAL_ADDR(emit->as, reg_dest, local_num); + } +} + +STATIC void emit_native_mov_reg_qstr(emit_t *emit, int arg_reg, qstr qst) { + #if MICROPY_PERSISTENT_CODE_SAVE + size_t loc = ASM_MOV_REG_IMM_FIX_U16(emit->as, arg_reg, qst); + size_t link_idx = emit->qstr_link_cur++; + if (emit->pass == MP_PASS_EMIT) { + emit->qstr_link[link_idx].off = loc << 2 | 1; + emit->qstr_link[link_idx].qst = qst; + } + #else + ASM_MOV_REG_IMM(emit->as, arg_reg, qst); + #endif +} + +STATIC void emit_native_mov_reg_qstr_obj(emit_t *emit, int reg_dest, qstr qst) { + #if MICROPY_PERSISTENT_CODE_SAVE + size_t loc = ASM_MOV_REG_IMM_FIX_WORD(emit->as, reg_dest, (mp_uint_t)MP_OBJ_NEW_QSTR(qst)); + size_t link_idx = emit->qstr_link_cur++; + if (emit->pass == MP_PASS_EMIT) { + emit->qstr_link[link_idx].off = loc << 2 | 2; + emit->qstr_link[link_idx].qst = qst; + } + #else + ASM_MOV_REG_IMM(emit->as, reg_dest, (mp_uint_t)MP_OBJ_NEW_QSTR(qst)); + #endif +} + +#define emit_native_mov_state_imm_via(emit, local_num, imm, reg_temp) \ + do { \ + ASM_MOV_REG_IMM((emit)->as, (reg_temp), (imm)); \ + emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \ + } while (false) STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) { DEBUG_printf("start_pass(pass=%u, scope=%p)\n", pass, scope); emit->pass = pass; - emit->stack_start = 0; + emit->do_viper_types = scope->emit_options == MP_EMIT_OPT_VIPER; emit->stack_size = 0; + emit->const_table_cur_obj = 0; + emit->const_table_cur_raw_code = 0; + #if MICROPY_PERSISTENT_CODE_SAVE + emit->qstr_link_cur = 0; + #endif emit->last_emit_was_return_value = false; emit->scope = scope; @@ -213,17 +323,6 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop emit->local_vtype_alloc = scope->num_locals; } - // allocate memory for keeping track of the objects on the stack - // XXX don't know stack size on entry, and it should be maximum over all scopes - // XXX this is such a big hack and really needs to be fixed - if (emit->stack_info == NULL) { - emit->stack_info_alloc = scope->stack_size + 200; - emit->stack_info = m_new(stack_info_t, emit->stack_info_alloc); - } - - // set default type for return - emit->return_vtype = VTYPE_PYOBJ; - // set default type for arguments mp_uint_t num_args = emit->scope->num_pos_args + emit->scope->num_kwonly_args; if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) { @@ -236,6 +335,17 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop emit->local_vtype[i] = VTYPE_PYOBJ; } + // Set viper type for arguments + if (emit->do_viper_types) { + for (int i = 0; i < emit->scope->id_info_len; ++i) { + id_info_t *id = &emit->scope->id_info[i]; + if (id->flags & ID_FLAG_IS_PARAM) { + assert(id->local_num < emit->local_vtype_alloc); + emit->local_vtype[id->local_num] = id->flags >> ID_FLAG_VIPER_TYPE_POS; + } + } + } + // local variables begin unbound, and have unknown type for (mp_uint_t i = num_args; i < emit->local_vtype_alloc; i++) { emit->local_vtype[i] = VTYPE_UNBOUND; @@ -251,111 +361,171 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop // generate code for entry to function + // Work out start of code state (mp_code_state_t or reduced version for viper) + emit->code_state_start = 0; + if (NEED_GLOBAL_EXC_HANDLER(emit)) { + emit->code_state_start = sizeof(nlr_buf_t) / sizeof(uintptr_t); + } + if (emit->do_viper_types) { - - // right now we have a restriction of maximum of 4 arguments - if (scope->num_pos_args >= 5) { - EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "Viper functions don't currently support more than 4 arguments"); - return; - } - - // entry to function - int num_locals = 0; - if (pass > MP_PASS_SCOPE) { - num_locals = scope->num_locals - REG_LOCAL_NUM; - if (num_locals < 0) { - num_locals = 0; + // Work out size of state (locals plus stack) + // n_state counts all stack and locals, even those in registers + emit->n_state = scope->num_locals + scope->stack_size; + int num_locals_in_regs = 0; + if (CAN_USE_REGS_FOR_LOCALS(emit)) { + num_locals_in_regs = scope->num_locals; + if (num_locals_in_regs > REG_LOCAL_NUM) { + num_locals_in_regs = REG_LOCAL_NUM; + } + // Need a spot for REG_LOCAL_3 if 4 or more args (see below) + if (scope->num_pos_args >= 4) { + --num_locals_in_regs; } - emit->stack_start = num_locals; - num_locals += scope->stack_size; } - ASM_ENTRY(emit->as, num_locals); - // TODO don't load r7 if we don't need it - #if N_THUMB - asm_thumb_mov_reg_i32(emit->as, ASM_THUMB_REG_R7, (mp_uint_t)mp_fun_table); - #elif N_ARM - asm_arm_mov_reg_i32(emit->as, ASM_ARM_REG_R7, (mp_uint_t)mp_fun_table); - #endif + // Work out where the locals and Python stack start within the C stack + if (NEED_GLOBAL_EXC_HANDLER(emit)) { + // Reserve 2 words for function object and old globals + emit->stack_start = emit->code_state_start + 2; + } else if (scope->scope_flags & MP_SCOPE_FLAG_HASCONSTS) { + // Reserve 1 word for function object, to access const table + emit->stack_start = emit->code_state_start + 1; + } else { + emit->stack_start = emit->code_state_start + 0; + } + + // Entry to function + ASM_ENTRY(emit->as, emit->stack_start + emit->n_state - num_locals_in_regs); #if N_X86 - for (int i = 0; i < scope->num_pos_args; i++) { - if (i == 0) { - asm_x86_mov_arg_to_r32(emit->as, i, REG_LOCAL_1); - } else if (i == 1) { - asm_x86_mov_arg_to_r32(emit->as, i, REG_LOCAL_2); - } else if (i == 2) { - asm_x86_mov_arg_to_r32(emit->as, i, REG_LOCAL_3); - } else { - asm_x86_mov_arg_to_r32(emit->as, i, REG_TEMP0); - asm_x86_mov_r32_to_local(emit->as, REG_TEMP0, i - REG_LOCAL_NUM); - } - } - #else - for (int i = 0; i < scope->num_pos_args; i++) { - if (i == 0) { - ASM_MOV_REG_REG(emit->as, REG_LOCAL_1, REG_ARG_1); - } else if (i == 1) { - ASM_MOV_REG_REG(emit->as, REG_LOCAL_2, REG_ARG_2); - } else if (i == 2) { - ASM_MOV_REG_REG(emit->as, REG_LOCAL_3, REG_ARG_3); - } else { - assert(i == 3); // should be true; max 4 args is checked above - ASM_MOV_LOCAL_REG(emit->as, i - REG_LOCAL_NUM, REG_ARG_4); - } - } + asm_x86_mov_arg_to_r32(emit->as, 0, REG_ARG_1); #endif + // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_ARG_1, offsetof(mp_obj_fun_bc_t, const_table) / sizeof(uintptr_t)); + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_LOCAL_3, 0); + + // Store function object (passed as first arg) to stack if needed + if (NEED_FUN_OBJ(emit)) { + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_ARG_1); + } + + // Put n_args in REG_ARG_1, n_kw in REG_ARG_2, args array in REG_LOCAL_3 + #if N_X86 + asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_1); + asm_x86_mov_arg_to_r32(emit->as, 2, REG_ARG_2); + asm_x86_mov_arg_to_r32(emit->as, 3, REG_LOCAL_3); + #else + ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_ARG_2); + ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_ARG_3); + ASM_MOV_REG_REG(emit->as, REG_LOCAL_3, REG_ARG_4); + #endif + + // Check number of args matches this function, and call mp_arg_check_num_sig if not + ASM_JUMP_IF_REG_NONZERO(emit->as, REG_ARG_2, *emit->label_slot + 4, true); + ASM_MOV_REG_IMM(emit->as, REG_ARG_3, scope->num_pos_args); + ASM_JUMP_IF_REG_EQ(emit->as, REG_ARG_1, REG_ARG_3, *emit->label_slot + 5); + mp_asm_base_label_assign(&emit->as->base, *emit->label_slot + 4); + ASM_MOV_REG_IMM(emit->as, REG_ARG_3, MP_OBJ_FUN_MAKE_SIG(scope->num_pos_args, scope->num_pos_args, false)); + ASM_CALL_IND(emit->as, MP_F_ARG_CHECK_NUM_SIG); + mp_asm_base_label_assign(&emit->as->base, *emit->label_slot + 5); + + // Store arguments into locals (reg or stack), converting to native if needed + for (int i = 0; i < emit->scope->num_pos_args; i++) { + int r = REG_ARG_1; + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_LOCAL_3, i); + if (emit->local_vtype[i] != VTYPE_PYOBJ) { + emit_call_with_imm_arg(emit, MP_F_CONVERT_OBJ_TO_NATIVE, emit->local_vtype[i], REG_ARG_2); + r = REG_RET; + } + // REG_LOCAL_3 points to the args array so be sure not to overwrite it if it's still needed + if (i < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit) && (i != 2 || emit->scope->num_pos_args == 3)) { + ASM_MOV_REG_REG(emit->as, reg_local_table[i], r); + } else { + emit_native_mov_state_reg(emit, LOCAL_IDX_LOCAL_VAR(emit, i), r); + } + } + // Get 3rd local from the stack back into REG_LOCAL_3 if this reg couldn't be written to above + if (emit->scope->num_pos_args >= 4 && CAN_USE_REGS_FOR_LOCALS(emit)) { + ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_3, LOCAL_IDX_LOCAL_VAR(emit, 2)); + } + + emit_native_global_exc_entry(emit); + } else { // work out size of state (locals plus stack) emit->n_state = scope->num_locals + scope->stack_size; - // allocate space on C-stack for code_state structure, which includes state - ASM_ENTRY(emit->as, STATE_START + emit->n_state); + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + emit->code_state_start = 0; + emit->stack_start = sizeof(mp_code_state_t) / sizeof(mp_uint_t); + mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)emit->prelude_offset); + mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)emit->start_offset); + ASM_ENTRY(emit->as, sizeof(nlr_buf_t) / sizeof(uintptr_t)); - // TODO don't load r7 if we don't need it - #if N_THUMB - asm_thumb_mov_reg_i32(emit->as, ASM_THUMB_REG_R7, (mp_uint_t)mp_fun_table); - #elif N_ARM - asm_arm_mov_reg_i32(emit->as, ASM_ARM_REG_R7, (mp_uint_t)mp_fun_table); - #endif + // Put address of code_state into REG_GENERATOR_STATE + #if N_X86 + asm_x86_mov_arg_to_r32(emit->as, 0, REG_GENERATOR_STATE); + #else + ASM_MOV_REG_REG(emit->as, REG_GENERATOR_STATE, REG_ARG_1); + #endif - // prepare incoming arguments for call to mp_setup_code_state + // Put throw value into LOCAL_IDX_EXC_VAL slot, for yield/yield-from + #if N_X86 + asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_2); + #endif + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_ARG_2); - #if N_X86 - asm_x86_mov_arg_to_r32(emit->as, 0, REG_ARG_1); - asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_2); - asm_x86_mov_arg_to_r32(emit->as, 2, REG_ARG_3); - asm_x86_mov_arg_to_r32(emit->as, 3, REG_ARG_4); - #endif + // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_GENERATOR_STATE, LOCAL_IDX_FUN_OBJ(emit)); + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, offsetof(mp_obj_fun_bc_t, const_table) / sizeof(uintptr_t)); + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_TEMP0, emit->scope->num_pos_args + emit->scope->num_kwonly_args); + } else { + // The locals and stack start after the code_state structure + emit->stack_start = emit->code_state_start + sizeof(mp_code_state_t) / sizeof(mp_uint_t); - // set code_state.fun_bc - ASM_MOV_LOCAL_REG(emit->as, offsetof(mp_code_state_t, fun_bc) / sizeof(uintptr_t), REG_ARG_1); + // Allocate space on C-stack for code_state structure, which includes state + ASM_ENTRY(emit->as, emit->stack_start + emit->n_state); - // set code_state.ip (offset from start of this function to prelude info) - // XXX this encoding may change size - ASM_MOV_LOCAL_IMM_VIA(emit->as, offsetof(mp_code_state_t, ip) / sizeof(uintptr_t), emit->prelude_offset, REG_ARG_1); + // Prepare incoming arguments for call to mp_setup_code_state - // put address of code_state into first arg - ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0); + #if N_X86 + asm_x86_mov_arg_to_r32(emit->as, 0, REG_ARG_1); + asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_2); + asm_x86_mov_arg_to_r32(emit->as, 2, REG_ARG_3); + asm_x86_mov_arg_to_r32(emit->as, 3, REG_ARG_4); + #endif - // call mp_setup_code_state to prepare code_state structure - #if N_THUMB - asm_thumb_bl_ind(emit->as, mp_fun_table[MP_F_SETUP_CODE_STATE], MP_F_SETUP_CODE_STATE, ASM_THUMB_REG_R4); - #elif N_ARM - asm_arm_bl_ind(emit->as, mp_fun_table[MP_F_SETUP_CODE_STATE], MP_F_SETUP_CODE_STATE, ASM_ARM_REG_R4); - #else - ASM_CALL_IND(emit->as, mp_fun_table[MP_F_SETUP_CODE_STATE], MP_F_SETUP_CODE_STATE); - #endif + // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_ARG_1, offsetof(mp_obj_fun_bc_t, const_table) / sizeof(uintptr_t)); + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_LOCAL_3, emit->scope->num_pos_args + emit->scope->num_kwonly_args); - // cache some locals in registers - if (scope->num_locals > 0) { - ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_1, STATE_START + emit->n_state - 1 - 0); - if (scope->num_locals > 1) { - ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_2, STATE_START + emit->n_state - 1 - 1); - if (scope->num_locals > 2) { - ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_3, STATE_START + emit->n_state - 1 - 2); - } + // Set code_state.fun_bc + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_ARG_1); + + // Set code_state.ip (offset from start of this function to prelude info) + // TODO this encoding may change size in the final pass, need to make it fixed + emit_native_mov_state_imm_via(emit, emit->code_state_start + offsetof(mp_code_state_t, ip) / sizeof(uintptr_t), emit->prelude_offset, REG_ARG_1); + + // Put address of code_state into first arg + ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, emit->code_state_start); + + // Call mp_setup_code_state to prepare code_state structure + #if N_THUMB + asm_thumb_bl_ind(emit->as, MP_F_SETUP_CODE_STATE, ASM_THUMB_REG_R4); + #elif N_ARM + asm_arm_bl_ind(emit->as, MP_F_SETUP_CODE_STATE, ASM_ARM_REG_R4); + #else + ASM_CALL_IND(emit->as, MP_F_SETUP_CODE_STATE); + #endif + } + + emit_native_global_exc_entry(emit); + + // cache some locals in registers, but only if no exception handlers + if (CAN_USE_REGS_FOR_LOCALS(emit)) { + for (int i = 0; i < REG_LOCAL_NUM && i < scope->num_locals; ++i) { + ASM_MOV_REG_LOCAL(emit->as, reg_local_table[i], LOCAL_IDX_LOCAL_VAR(emit, i)); } } @@ -366,14 +536,28 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop emit->local_vtype[id->local_num] = VTYPE_PYOBJ; } } + + if (pass == MP_PASS_EMIT) { + // write argument names as qstr objects + // see comment in corresponding part of emitbc.c about the logic here + for (int i = 0; i < scope->num_pos_args + scope->num_kwonly_args; i++) { + qstr qst = MP_QSTR__star_; + for (int j = 0; j < scope->id_info_len; ++j) { + id_info_t *id = &scope->id_info[j]; + if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) { + qst = id->qst; + break; + } + } + emit->const_table[i] = (mp_uint_t)MP_OBJ_NEW_QSTR(qst); + } + } } } STATIC void emit_native_end_pass(emit_t *emit) { - if (!emit->last_emit_was_return_value) { - ASM_EXIT(emit->as); - } + emit_native_global_exc_exit(emit); if (!emit->do_viper_types) { emit->prelude_offset = mp_asm_base_get_code_pos(&emit->as->base); @@ -405,46 +589,50 @@ STATIC void emit_native_end_pass(emit_t *emit) { } } mp_asm_base_data(&emit->as->base, 1, 255); // end of list sentinel - - mp_asm_base_align(&emit->as->base, ASM_WORD_SIZE); - emit->const_table_offset = mp_asm_base_get_code_pos(&emit->as->base); - - // write argument names as qstr objects - // see comment in corresponding part of emitbc.c about the logic here - for (int i = 0; i < emit->scope->num_pos_args + emit->scope->num_kwonly_args; i++) { - qstr qst = MP_QSTR__star_; - for (int j = 0; j < emit->scope->id_info_len; ++j) { - id_info_t *id = &emit->scope->id_info[j]; - if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) { - qst = id->qst; - break; - } - } - mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (mp_uint_t)MP_OBJ_NEW_QSTR(qst)); - } - } ASM_END_PASS(emit->as); // check stack is back to zero size assert(emit->stack_size == 0); + assert(emit->exc_stack_size == 0); + + // Deal with const table accounting + assert(emit->pass <= MP_PASS_STACK_SIZE || (emit->const_table_num_obj == emit->const_table_cur_obj)); + emit->const_table_num_obj = emit->const_table_cur_obj; + if (emit->pass == MP_PASS_CODE_SIZE) { + size_t const_table_alloc = 1 + emit->const_table_num_obj + emit->const_table_cur_raw_code; + size_t nqstr = 0; + if (!emit->do_viper_types) { + // Add room for qstr names of arguments + nqstr = emit->scope->num_pos_args + emit->scope->num_kwonly_args; + const_table_alloc += nqstr; + } + emit->const_table = m_new(mp_uint_t, const_table_alloc); + // Store mp_fun_table pointer just after qstrs + emit->const_table[nqstr] = (mp_uint_t)(uintptr_t)mp_fun_table; + + #if MICROPY_PERSISTENT_CODE_SAVE + size_t qstr_link_alloc = emit->qstr_link_cur; + if (qstr_link_alloc > 0) { + emit->qstr_link = m_new(mp_qstr_link_entry_t, qstr_link_alloc); + } + #endif + } if (emit->pass == MP_PASS_EMIT) { void *f = mp_asm_base_get_code(&emit->as->base); mp_uint_t f_len = mp_asm_base_get_code_size(&emit->as->base); - // compute type signature - // note that the lower 4 bits of a vtype are tho correct MP_NATIVE_TYPE_xxx - mp_uint_t type_sig = emit->return_vtype & 0xf; - for (mp_uint_t i = 0; i < emit->scope->num_pos_args; i++) { - type_sig |= (emit->local_vtype[i] & 0xf) << (i * 4 + 4); - } - mp_emit_glue_assign_native(emit->scope->raw_code, emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY, - f, f_len, (mp_uint_t*)((byte*)f + emit->const_table_offset), - emit->scope->num_pos_args, emit->scope->scope_flags, type_sig); + f, f_len, emit->const_table, + #if MICROPY_PERSISTENT_CODE_SAVE + emit->prelude_offset, + emit->const_table_cur_obj, emit->const_table_cur_raw_code, + emit->qstr_link_cur, emit->qstr_link, + #endif + emit->scope->num_pos_args, emit->scope->scope_flags, 0); } } @@ -452,8 +640,17 @@ STATIC bool emit_native_last_emit_was_return_value(emit_t *emit) { return emit->last_emit_was_return_value; } +STATIC void ensure_extra_stack(emit_t *emit, size_t delta) { + if (emit->stack_size + delta > emit->stack_info_alloc) { + size_t new_alloc = (emit->stack_size + delta + 8) & ~3; + emit->stack_info = m_renew(stack_info_t, emit->stack_info, emit->stack_info_alloc, new_alloc); + emit->stack_info_alloc = new_alloc; + } +} + STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) { assert((mp_int_t)emit->stack_size + stack_size_delta >= 0); + assert((mp_int_t)emit->stack_size + stack_size_delta <= (mp_int_t)emit->stack_info_alloc); emit->stack_size += stack_size_delta; if (emit->pass > MP_PASS_SCOPE && emit->stack_size > emit->scope->stack_size) { emit->scope->stack_size = emit->stack_size; @@ -470,6 +667,9 @@ STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) { STATIC void emit_native_adjust_stack_size(emit_t *emit, mp_int_t delta) { DEBUG_printf("adjust_stack_size(" INT_FMT ")\n", delta); + if (delta > 0) { + ensure_extra_stack(emit, delta); + } // If we are adjusting the stack in a positive direction (pushing) then we // need to fill in values for the stack kind and vtype of the newly-pushed // entries. These should be set to "value" (ie not reg or imm) because we @@ -506,7 +706,12 @@ STATIC stack_info_t *peek_stack(emit_t *emit, mp_uint_t depth) { // depth==0 is top, depth==1 is before top, etc STATIC vtype_kind_t peek_vtype(emit_t *emit, mp_uint_t depth) { - return peek_stack(emit, depth)->vtype; + if (emit->do_viper_types) { + return peek_stack(emit, depth)->vtype; + } else { + // Type is always PYOBJ even if the intermediate stored value is not + return VTYPE_PYOBJ; + } } // pos=1 is TOS, pos=2 is next, etc @@ -518,7 +723,7 @@ STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) { stack_info_t *si = &emit->stack_info[i]; if (si->kind == STACK_REG && si->data.u_reg == reg_needed) { si->kind = STACK_VALUE; - ASM_MOV_LOCAL_REG(emit->as, emit->stack_start + i, si->data.u_reg); + emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg); } } } @@ -529,11 +734,31 @@ STATIC void need_reg_all(emit_t *emit) { stack_info_t *si = &emit->stack_info[i]; if (si->kind == STACK_REG) { si->kind = STACK_VALUE; - ASM_MOV_LOCAL_REG(emit->as, emit->stack_start + i, si->data.u_reg); + emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg); } } } +STATIC vtype_kind_t load_reg_stack_imm(emit_t *emit, int reg_dest, const stack_info_t *si, bool convert_to_pyobj) { + if (!convert_to_pyobj && emit->do_viper_types) { + ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm); + return si->vtype; + } else { + if (si->vtype == VTYPE_PYOBJ) { + ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm); + } else if (si->vtype == VTYPE_BOOL) { + emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_FALSE_OBJ + si->data.u_imm); + } else if (si->vtype == VTYPE_INT || si->vtype == VTYPE_UINT) { + ASM_MOV_REG_IMM(emit->as, reg_dest, (uintptr_t)MP_OBJ_NEW_SMALL_INT(si->data.u_imm)); + } else if (si->vtype == VTYPE_PTR_NONE) { + emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_NONE_OBJ); + } else { + mp_raise_NotImplementedError("conversion to object"); + } + return VTYPE_PYOBJ; + } +} + STATIC void need_stack_settled(emit_t *emit) { DEBUG_printf(" need_stack_settled; stack_size=%d\n", emit->stack_size); for (int i = 0; i < emit->stack_size; i++) { @@ -541,7 +766,7 @@ STATIC void need_stack_settled(emit_t *emit) { if (si->kind == STACK_REG) { DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i); si->kind = STACK_VALUE; - ASM_MOV_LOCAL_REG(emit->as, emit->stack_start + i, si->data.u_reg); + emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg); } } for (int i = 0; i < emit->stack_size; i++) { @@ -549,7 +774,8 @@ STATIC void need_stack_settled(emit_t *emit) { if (si->kind == STACK_IMM) { DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i); si->kind = STACK_VALUE; - ASM_MOV_LOCAL_IMM_VIA(emit->as, emit->stack_start + i, si->data.u_imm, REG_TEMP0); + si->vtype = load_reg_stack_imm(emit, REG_TEMP0, si, false); + emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP0); } } } @@ -561,7 +787,7 @@ STATIC void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int re *vtype = si->vtype; switch (si->kind) { case STACK_VALUE: - ASM_MOV_REG_LOCAL(emit->as, reg_dest, emit->stack_start + emit->stack_size - pos); + emit_native_mov_reg_state(emit, reg_dest, emit->stack_start + emit->stack_size - pos); break; case STACK_REG: @@ -571,7 +797,7 @@ STATIC void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int re break; case STACK_IMM: - ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm); + *vtype = load_reg_stack_imm(emit, reg_dest, si, false); break; } } @@ -583,7 +809,7 @@ STATIC void emit_fold_stack_top(emit_t *emit, int reg_dest) { si[0] = si[1]; if (si->kind == STACK_VALUE) { // if folded element was on the stack we need to put it in a register - ASM_MOV_REG_LOCAL(emit->as, reg_dest, emit->stack_start + emit->stack_size - 1); + emit_native_mov_reg_state(emit, reg_dest, emit->stack_start + emit->stack_size - 1); si->kind = STACK_REG; si->data.u_reg = reg_dest; } @@ -637,6 +863,7 @@ STATIC void emit_post_top_set_vtype(emit_t *emit, vtype_kind_t new_vtype) { } STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg) { + ensure_extra_stack(emit, 1); stack_info_t *si = &emit->stack_info[emit->stack_size]; si->vtype = vtype; si->kind = STACK_REG; @@ -645,6 +872,7 @@ STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg) { } STATIC void emit_post_push_imm(emit_t *emit, vtype_kind_t vtype, mp_int_t imm) { + ensure_extra_stack(emit, 1); stack_info_t *si = &emit->stack_info[emit->stack_size]; si->vtype = vtype; si->kind = STACK_IMM; @@ -672,36 +900,26 @@ STATIC void emit_post_push_reg_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, in STATIC void emit_call(emit_t *emit, mp_fun_kind_t fun_kind) { need_reg_all(emit); - ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind); + ASM_CALL_IND(emit->as, fun_kind); } STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) { need_reg_all(emit); ASM_MOV_REG_IMM(emit->as, arg_reg, arg_val); - ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind); -} - -// the first arg is stored in the code aligned on a mp_uint_t boundary -STATIC void emit_call_with_imm_arg_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) { - need_reg_all(emit); - ASM_MOV_REG_ALIGNED_IMM(emit->as, arg_reg, arg_val); - ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind); + ASM_CALL_IND(emit->as, fun_kind); } STATIC void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) { need_reg_all(emit); ASM_MOV_REG_IMM(emit->as, arg_reg1, arg_val1); ASM_MOV_REG_IMM(emit->as, arg_reg2, arg_val2); - ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind); + ASM_CALL_IND(emit->as, fun_kind); } -// the first arg is stored in the code aligned on a mp_uint_t boundary -STATIC void emit_call_with_3_imm_args_and_first_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2, mp_int_t arg_val3, int arg_reg3) { +STATIC void emit_call_with_qstr_arg(emit_t *emit, mp_fun_kind_t fun_kind, qstr qst, int arg_reg) { need_reg_all(emit); - ASM_MOV_REG_ALIGNED_IMM(emit->as, arg_reg1, arg_val1); - ASM_MOV_REG_IMM(emit->as, arg_reg2, arg_val2); - ASM_MOV_REG_IMM(emit->as, arg_reg3, arg_val3); - ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind); + emit_native_mov_reg_qstr(emit, arg_reg, qst); + ASM_CALL_IND(emit->as, fun_kind); } // vtype of all n_pop objects is VTYPE_PYOBJ @@ -718,27 +936,8 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de // must convert them to VTYPE_PYOBJ for viper code if (si->kind == STACK_IMM) { si->kind = STACK_VALUE; - switch (si->vtype) { - case VTYPE_PYOBJ: - ASM_MOV_LOCAL_IMM_VIA(emit->as, emit->stack_start + emit->stack_size - 1 - i, si->data.u_imm, reg_dest); - break; - case VTYPE_BOOL: - if (si->data.u_imm == 0) { - ASM_MOV_LOCAL_IMM_VIA(emit->as, emit->stack_start + emit->stack_size - 1 - i, (mp_uint_t)mp_const_false, reg_dest); - } else { - ASM_MOV_LOCAL_IMM_VIA(emit->as, emit->stack_start + emit->stack_size - 1 - i, (mp_uint_t)mp_const_true, reg_dest); - } - si->vtype = VTYPE_PYOBJ; - break; - case VTYPE_INT: - case VTYPE_UINT: - ASM_MOV_LOCAL_IMM_VIA(emit->as, emit->stack_start + emit->stack_size - 1 - i, (uintptr_t)MP_OBJ_NEW_SMALL_INT(si->data.u_imm), reg_dest); - si->vtype = VTYPE_PYOBJ; - break; - default: - // not handled - mp_raise_NotImplementedError("conversion to object"); - } + si->vtype = load_reg_stack_imm(emit, reg_dest, si, true); + emit_native_mov_state_reg(emit, emit->stack_start + emit->stack_size - 1 - i, reg_dest); } // verify that this value is on the stack @@ -750,9 +949,9 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i]; if (si->vtype != VTYPE_PYOBJ) { mp_uint_t local_num = emit->stack_start + emit->stack_size - 1 - i; - ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, local_num); + emit_native_mov_reg_state(emit, REG_ARG_1, local_num); emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, si->vtype, REG_ARG_2); // arg2 = type - ASM_MOV_LOCAL_REG(emit->as, local_num, REG_RET); + emit_native_mov_state_reg(emit, local_num, REG_RET); si->vtype = VTYPE_PYOBJ; DEBUG_printf(" convert_native_to_obj(local_num=" UINT_FMT ")\n", local_num); } @@ -760,61 +959,270 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de // Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest. adjust_stack(emit, -n_pop); - ASM_MOV_REG_LOCAL_ADDR(emit->as, reg_dest, emit->stack_start + emit->stack_size); + emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size); } // vtype of all n_push objects is VTYPE_PYOBJ STATIC void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_push) { need_reg_all(emit); + ensure_extra_stack(emit, n_push); for (mp_uint_t i = 0; i < n_push; i++) { emit->stack_info[emit->stack_size + i].kind = STACK_VALUE; emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ; } - ASM_MOV_REG_LOCAL_ADDR(emit->as, reg_dest, emit->stack_start + emit->stack_size); + emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size); adjust_stack(emit, n_push); } +STATIC void emit_native_push_exc_stack(emit_t *emit, uint label, bool is_finally) { + if (emit->exc_stack_size + 1 > emit->exc_stack_alloc) { + size_t new_alloc = emit->exc_stack_alloc + 4; + emit->exc_stack = m_renew(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc, new_alloc); + emit->exc_stack_alloc = new_alloc; + } + + exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size++]; + e->label = label; + e->is_finally = is_finally; + e->unwind_label = UNWIND_LABEL_UNUSED; + e->is_active = true; + + ASM_MOV_REG_PCREL(emit->as, REG_RET, label); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET); +} + +STATIC void emit_native_leave_exc_stack(emit_t *emit, bool start_of_handler) { + assert(emit->exc_stack_size > 0); + + // Get current exception handler and deactivate it + exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1]; + e->is_active = false; + + // Find next innermost active exception handler, to restore as current handler + for (--e; e >= emit->exc_stack && !e->is_active; --e) { + } + + // Update the PC of the new exception handler + if (e < emit->exc_stack) { + // No active handler, clear handler PC to zero + if (start_of_handler) { + // Optimisation: PC is already cleared by global exc handler + return; + } + ASM_XOR_REG_REG(emit->as, REG_RET, REG_RET); + } else { + // Found new active handler, get its PC + ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label); + } + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET); +} + +STATIC exc_stack_entry_t *emit_native_pop_exc_stack(emit_t *emit) { + assert(emit->exc_stack_size > 0); + exc_stack_entry_t *e = &emit->exc_stack[--emit->exc_stack_size]; + assert(e->is_active == false); + return e; +} + +STATIC void emit_load_reg_with_ptr(emit_t *emit, int reg, mp_uint_t ptr, size_t table_off) { + if (!emit->do_viper_types) { + // Skip qstr names of arguments + table_off += emit->scope->num_pos_args + emit->scope->num_kwonly_args; + } + if (emit->pass == MP_PASS_EMIT) { + emit->const_table[table_off] = ptr; + } + emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_FUN_OBJ(emit)); + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, offsetof(mp_obj_fun_bc_t, const_table) / sizeof(uintptr_t)); + ASM_LOAD_REG_REG_OFFSET(emit->as, reg, REG_TEMP0, table_off); +} + +STATIC void emit_load_reg_with_object(emit_t *emit, int reg, mp_obj_t obj) { + // First entry is for mp_fun_table + size_t table_off = 1 + emit->const_table_cur_obj++; + emit_load_reg_with_ptr(emit, reg, (mp_uint_t)obj, table_off); +} + +STATIC void emit_load_reg_with_raw_code(emit_t *emit, int reg, mp_raw_code_t *rc) { + // First entry is for mp_fun_table, then constant objects + size_t table_off = 1 + emit->const_table_num_obj + emit->const_table_cur_raw_code++; + emit_load_reg_with_ptr(emit, reg, (mp_uint_t)rc, table_off); +} + STATIC void emit_native_label_assign(emit_t *emit, mp_uint_t l) { DEBUG_printf("label_assign(" UINT_FMT ")\n", l); + + bool is_finally = false; + if (emit->exc_stack_size > 0) { + exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1]; + is_finally = e->is_finally && e->label == l; + } + + if (is_finally) { + // Label is at start of finally handler: store TOS into exception slot + vtype_kind_t vtype; + emit_pre_pop_reg(emit, &vtype, REG_TEMP0); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0); + } + emit_native_pre(emit); // need to commit stack because we can jump here from elsewhere need_stack_settled(emit); mp_asm_base_label_assign(&emit->as->base, l); emit_post(emit); + + if (is_finally) { + // Label is at start of finally handler: pop exception stack + emit_native_leave_exc_stack(emit, false); + } +} + +STATIC void emit_native_global_exc_entry(emit_t *emit) { + // Note: 4 labels are reserved for this function, starting at *emit->label_slot + + emit->exit_label = *emit->label_slot; + + if (NEED_GLOBAL_EXC_HANDLER(emit)) { + mp_uint_t nlr_label = *emit->label_slot + 1; + mp_uint_t start_label = *emit->label_slot + 2; + mp_uint_t global_except_label = *emit->label_slot + 3; + + if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) { + // Set new globals + emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_FUN_OBJ(emit)); + ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_ARG_1, offsetof(mp_obj_fun_bc_t, globals) / sizeof(uintptr_t)); + emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS); + + // Save old globals (or NULL if globals didn't change) + emit_native_mov_state_reg(emit, LOCAL_IDX_OLD_GLOBALS(emit), REG_RET); + } + + if (emit->scope->exc_stack_size == 0) { + if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) { + // Optimisation: if globals didn't change don't push the nlr context + ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, start_label, false); + } + + // Wrap everything in an nlr context + ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0); + emit_call(emit, MP_F_NLR_PUSH); + ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, start_label, true); + } else { + // Clear the unwind state + ASM_XOR_REG_REG(emit->as, REG_TEMP0, REG_TEMP0); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_TEMP0); + + // Put PC of start code block into REG_LOCAL_1 + ASM_MOV_REG_PCREL(emit->as, REG_LOCAL_1, start_label); + + // Wrap everything in an nlr context + emit_native_label_assign(emit, nlr_label); + ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_2, LOCAL_IDX_EXC_HANDLER_UNWIND(emit)); + ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0); + emit_call(emit, MP_F_NLR_PUSH); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_LOCAL_2); + ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, global_except_label, true); + + // Clear PC of current code block, and jump there to resume execution + ASM_XOR_REG_REG(emit->as, REG_TEMP0, REG_TEMP0); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_TEMP0); + ASM_JUMP_REG(emit->as, REG_LOCAL_1); + + // Global exception handler: check for valid exception handler + emit_native_label_assign(emit, global_except_label); + ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_1, LOCAL_IDX_EXC_HANDLER_PC(emit)); + ASM_JUMP_IF_REG_NONZERO(emit->as, REG_LOCAL_1, nlr_label, false); + } + + if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) { + // Restore old globals + emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_OLD_GLOBALS(emit)); + emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS); + } + + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + // Store return value in state[0] + ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, LOCAL_IDX_EXC_VAL(emit)); + ASM_STORE_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_GENERATOR_STATE, offsetof(mp_code_state_t, state) / sizeof(uintptr_t)); + + // Load return kind + ASM_MOV_REG_IMM(emit->as, REG_RET, MP_VM_RETURN_EXCEPTION); + + ASM_EXIT(emit->as); + } else { + // Re-raise exception out to caller + ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); + emit_call(emit, MP_F_NATIVE_RAISE); + } + + // Label for start of function + emit_native_label_assign(emit, start_label); + + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_GEN_PC(emit)); + ASM_JUMP_REG(emit->as, REG_TEMP0); + emit->start_offset = mp_asm_base_get_code_pos(&emit->as->base); + + // This is the first entry of the generator + + // Check LOCAL_IDX_EXC_VAL for any injected value + ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); + emit_call(emit, MP_F_NATIVE_RAISE); + } + } +} + +STATIC void emit_native_global_exc_exit(emit_t *emit) { + // Label for end of function + emit_native_label_assign(emit, emit->exit_label); + + if (NEED_GLOBAL_EXC_HANDLER(emit)) { + // Get old globals + if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) { + emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_OLD_GLOBALS(emit)); + + if (emit->scope->exc_stack_size == 0) { + // Optimisation: if globals didn't change then don't restore them and don't do nlr_pop + ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, emit->exit_label + 1, false); + } + + // Restore old globals + emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS); + } + + // Pop the nlr context + emit_call(emit, MP_F_NLR_POP); + + if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) { + if (emit->scope->exc_stack_size == 0) { + // Destination label for above optimisation + emit_native_label_assign(emit, emit->exit_label + 1); + } + } + + // Load return value + ASM_MOV_REG_LOCAL(emit->as, REG_RET, LOCAL_IDX_RET_VAL(emit)); + } + + ASM_EXIT(emit->as); } STATIC void emit_native_import_name(emit_t *emit, qstr qst) { DEBUG_printf("import_name %s\n", qstr_str(qst)); // get arguments from stack: arg2 = fromlist, arg3 = level - // if using viper types these arguments must be converted to proper objects - if (emit->do_viper_types) { - // fromlist should be None or a tuple - stack_info_t *top = peek_stack(emit, 0); - if (top->vtype == VTYPE_PTR_NONE) { - emit_pre_pop_discard(emit); - ASM_MOV_REG_IMM(emit->as, REG_ARG_2, (mp_uint_t)mp_const_none); - } else { - vtype_kind_t vtype_fromlist; - emit_pre_pop_reg(emit, &vtype_fromlist, REG_ARG_2); - assert(vtype_fromlist == VTYPE_PYOBJ); - } + // If using viper types these arguments must be converted to proper objects, and + // to accomplish this viper types are turned off for the emit_pre_pop_reg_reg call. + bool orig_do_viper_types = emit->do_viper_types; + emit->do_viper_types = false; + vtype_kind_t vtype_fromlist; + vtype_kind_t vtype_level; + emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3); + assert(vtype_fromlist == VTYPE_PYOBJ); + assert(vtype_level == VTYPE_PYOBJ); + emit->do_viper_types = orig_do_viper_types; - // level argument should be an immediate integer - top = peek_stack(emit, 0); - assert(top->vtype == VTYPE_INT && top->kind == STACK_IMM); - ASM_MOV_REG_IMM(emit->as, REG_ARG_3, (mp_uint_t)MP_OBJ_NEW_SMALL_INT(top->data.u_imm)); - emit_pre_pop_discard(emit); - - } else { - vtype_kind_t vtype_fromlist; - vtype_kind_t vtype_level; - emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3); - assert(vtype_fromlist == VTYPE_PYOBJ); - assert(vtype_level == VTYPE_PYOBJ); - } - - emit_call_with_imm_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name + emit_call_with_qstr_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -824,7 +1232,7 @@ STATIC void emit_native_import_from(emit_t *emit, qstr qst) { vtype_kind_t vtype_module; emit_access_stack(emit, 1, &vtype_module, REG_ARG_1); // arg1 = module assert(vtype_module == VTYPE_PYOBJ); - emit_call_with_imm_arg(emit, MP_F_IMPORT_FROM, qst, REG_ARG_2); // arg2 = import name + emit_call_with_qstr_arg(emit, MP_F_IMPORT_FROM, qst, REG_ARG_2); // arg2 = import name emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -837,42 +1245,38 @@ STATIC void emit_native_import_star(emit_t *emit) { emit_post(emit); } +STATIC void emit_native_import(emit_t *emit, qstr qst, int kind) { + if (kind == MP_EMIT_IMPORT_NAME) { + emit_native_import_name(emit, qst); + } else if (kind == MP_EMIT_IMPORT_FROM) { + emit_native_import_from(emit, qst); + } else { + emit_native_import_star(emit); + } +} + STATIC void emit_native_load_const_tok(emit_t *emit, mp_token_kind_t tok) { DEBUG_printf("load_const_tok(tok=%u)\n", tok); - emit_native_pre(emit); - vtype_kind_t vtype; - mp_uint_t val; - if (emit->do_viper_types) { - switch (tok) { - case MP_TOKEN_KW_NONE: vtype = VTYPE_PTR_NONE; val = 0; break; - case MP_TOKEN_KW_FALSE: vtype = VTYPE_BOOL; val = 0; break; - case MP_TOKEN_KW_TRUE: vtype = VTYPE_BOOL; val = 1; break; - default: - assert(tok == MP_TOKEN_ELLIPSIS); - vtype = VTYPE_PYOBJ; val = (mp_uint_t)&mp_const_ellipsis_obj; break; - } + if (tok == MP_TOKEN_ELLIPSIS) { + #if MICROPY_PERSISTENT_CODE_SAVE + emit_native_load_const_obj(emit, MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj)); + #else + emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj)); + #endif } else { - vtype = VTYPE_PYOBJ; - switch (tok) { - case MP_TOKEN_KW_NONE: val = (mp_uint_t)mp_const_none; break; - case MP_TOKEN_KW_FALSE: val = (mp_uint_t)mp_const_false; break; - case MP_TOKEN_KW_TRUE: val = (mp_uint_t)mp_const_true; break; - default: - assert(tok == MP_TOKEN_ELLIPSIS); - val = (mp_uint_t)&mp_const_ellipsis_obj; break; + emit_native_pre(emit); + if (tok == MP_TOKEN_KW_NONE) { + emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); + } else { + emit_post_push_imm(emit, VTYPE_BOOL, tok == MP_TOKEN_KW_FALSE ? 0 : 1); } } - emit_post_push_imm(emit, vtype, val); } STATIC void emit_native_load_const_small_int(emit_t *emit, mp_int_t arg) { DEBUG_printf("load_const_small_int(int=" INT_FMT ")\n", arg); emit_native_pre(emit); - if (emit->do_viper_types) { - emit_post_push_imm(emit, VTYPE_INT, arg); - } else { - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)MP_OBJ_NEW_SMALL_INT(arg)); - } + emit_post_push_imm(emit, VTYPE_INT, arg); } STATIC void emit_native_load_const_str(emit_t *emit, qstr qst) { @@ -886,14 +1290,17 @@ STATIC void emit_native_load_const_str(emit_t *emit, qstr qst) { } else */ { - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)MP_OBJ_NEW_QSTR(qst)); + need_reg_single(emit, REG_TEMP0, 0); + emit_native_mov_reg_qstr_obj(emit, REG_TEMP0, qst); + emit_post_push_reg(emit, VTYPE_PYOBJ, REG_TEMP0); } } STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj) { + emit->scope->scope_flags |= MP_SCOPE_FLAG_HASCONSTS; emit_native_pre(emit); need_reg_single(emit, REG_RET, 0); - ASM_MOV_REG_ALIGNED_IMM(emit->as, REG_RET, (mp_uint_t)obj); + emit_load_reg_with_object(emit, REG_RET, obj); emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -909,19 +1316,11 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) { EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "local '%q' used before type known", qst); } emit_native_pre(emit); - if (local_num == 0) { - emit_post_push_reg(emit, vtype, REG_LOCAL_1); - } else if (local_num == 1) { - emit_post_push_reg(emit, vtype, REG_LOCAL_2); - } else if (local_num == 2) { - emit_post_push_reg(emit, vtype, REG_LOCAL_3); + if (local_num < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit)) { + emit_post_push_reg(emit, vtype, reg_local_table[local_num]); } else { need_reg_single(emit, REG_TEMP0, 0); - if (emit->do_viper_types) { - ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, local_num - REG_LOCAL_NUM); - } else { - ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, STATE_START + emit->n_state - 1 - local_num); - } + emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_LOCAL_VAR(emit, local_num)); emit_post_push_reg(emit, vtype, REG_TEMP0); } } @@ -938,33 +1337,33 @@ STATIC void emit_native_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num) emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } -STATIC void emit_native_load_name(emit_t *emit, qstr qst) { - DEBUG_printf("load_name(%s)\n", qstr_str(qst)); - emit_native_pre(emit); - emit_call_with_imm_arg(emit, MP_F_LOAD_NAME, qst, REG_ARG_1); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); +STATIC void emit_native_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) { + if (kind == MP_EMIT_IDOP_LOCAL_FAST) { + emit_native_load_fast(emit, qst, local_num); + } else { + emit_native_load_deref(emit, qst, local_num); + } } -STATIC void emit_native_load_global(emit_t *emit, qstr qst) { - DEBUG_printf("load_global(%s)\n", qstr_str(qst)); +STATIC void emit_native_load_global(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_F_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_LOAD_NAME); + MP_STATIC_ASSERT(MP_F_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_LOAD_GLOBAL); emit_native_pre(emit); - // check for builtin casting operators - if (emit->do_viper_types && qst == MP_QSTR_int) { - emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_INT); - } else if (emit->do_viper_types && qst == MP_QSTR_uint) { - emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_UINT); - } else if (emit->do_viper_types && qst == MP_QSTR_ptr) { - emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR); - } else if (emit->do_viper_types && qst == MP_QSTR_ptr8) { - emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR8); - } else if (emit->do_viper_types && qst == MP_QSTR_ptr16) { - emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR16); - } else if (emit->do_viper_types && qst == MP_QSTR_ptr32) { - emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, VTYPE_PTR32); + if (kind == MP_EMIT_IDOP_GLOBAL_NAME) { + DEBUG_printf("load_name(%s)\n", qstr_str(qst)); } else { - emit_call_with_imm_arg(emit, MP_F_LOAD_GLOBAL, qst, REG_ARG_1); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); + DEBUG_printf("load_global(%s)\n", qstr_str(qst)); + if (emit->do_viper_types) { + // check for builtin casting operators + int native_type = mp_native_type_from_qstr(qst); + if (native_type >= MP_NATIVE_TYPE_BOOL) { + emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, native_type); + return; + } + } } + emit_call_with_qstr_arg(emit, MP_F_LOAD_NAME + kind, qst, REG_ARG_1); + emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } STATIC void emit_native_load_attr(emit_t *emit, qstr qst) { @@ -975,7 +1374,7 @@ STATIC void emit_native_load_attr(emit_t *emit, qstr qst) { vtype_kind_t vtype_base; emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base assert(vtype_base == VTYPE_PYOBJ); - emit_call_with_imm_arg(emit, MP_F_LOAD_ATTR, qst, REG_ARG_2); // arg2 = attribute name + emit_call_with_qstr_arg(emit, MP_F_LOAD_ATTR, qst, REG_ARG_2); // arg2 = attribute name emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -983,13 +1382,13 @@ STATIC void emit_native_load_method(emit_t *emit, qstr qst, bool is_super) { if (is_super) { emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, 3); // arg2 = dest ptr emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_2, 2); // arg2 = dest ptr - emit_call_with_imm_arg(emit, MP_F_LOAD_SUPER_METHOD, qst, REG_ARG_1); // arg1 = method name + emit_call_with_qstr_arg(emit, MP_F_LOAD_SUPER_METHOD, qst, REG_ARG_1); // arg1 = method name } else { vtype_kind_t vtype_base; emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base assert(vtype_base == VTYPE_PYOBJ); emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr - emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, qst, REG_ARG_2); // arg2 = method name + emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, qst, REG_ARG_2); // arg2 = method name } } @@ -1136,19 +1535,11 @@ STATIC void emit_native_load_subscr(emit_t *emit) { STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num) { vtype_kind_t vtype; - if (local_num == 0) { - emit_pre_pop_reg(emit, &vtype, REG_LOCAL_1); - } else if (local_num == 1) { - emit_pre_pop_reg(emit, &vtype, REG_LOCAL_2); - } else if (local_num == 2) { - emit_pre_pop_reg(emit, &vtype, REG_LOCAL_3); + if (local_num < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit)) { + emit_pre_pop_reg(emit, &vtype, reg_local_table[local_num]); } else { emit_pre_pop_reg(emit, &vtype, REG_TEMP0); - if (emit->do_viper_types) { - ASM_MOV_LOCAL_REG(emit->as, local_num - REG_LOCAL_NUM, REG_TEMP0); - } else { - ASM_MOV_LOCAL_REG(emit->as, STATE_START + emit->n_state - 1 - local_num, REG_TEMP0); - } + emit_native_mov_state_reg(emit, LOCAL_IDX_LOCAL_VAR(emit, local_num), REG_TEMP0); } emit_post(emit); @@ -1178,25 +1569,33 @@ STATIC void emit_native_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num) emit_post(emit); } -STATIC void emit_native_store_name(emit_t *emit, qstr qst) { - // mp_store_name, but needs conversion of object (maybe have mp_viper_store_name(obj, type)) - vtype_kind_t vtype; - emit_pre_pop_reg(emit, &vtype, REG_ARG_2); - assert(vtype == VTYPE_PYOBJ); - emit_call_with_imm_arg(emit, MP_F_STORE_NAME, qst, REG_ARG_1); // arg1 = name - emit_post(emit); +STATIC void emit_native_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) { + if (kind == MP_EMIT_IDOP_LOCAL_FAST) { + emit_native_store_fast(emit, qst, local_num); + } else { + emit_native_store_deref(emit, qst, local_num); + } } -STATIC void emit_native_store_global(emit_t *emit, qstr qst) { - vtype_kind_t vtype = peek_vtype(emit, 0); - if (vtype == VTYPE_PYOBJ) { +STATIC void emit_native_store_global(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_F_STORE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_STORE_NAME); + MP_STATIC_ASSERT(MP_F_STORE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_STORE_GLOBAL); + if (kind == MP_EMIT_IDOP_GLOBAL_NAME) { + // mp_store_name, but needs conversion of object (maybe have mp_viper_store_name(obj, type)) + vtype_kind_t vtype; emit_pre_pop_reg(emit, &vtype, REG_ARG_2); + assert(vtype == VTYPE_PYOBJ); } else { - emit_pre_pop_reg(emit, &vtype, REG_ARG_1); - emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type - ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET); + vtype_kind_t vtype = peek_vtype(emit, 0); + if (vtype == VTYPE_PYOBJ) { + emit_pre_pop_reg(emit, &vtype, REG_ARG_2); + } else { + emit_pre_pop_reg(emit, &vtype, REG_ARG_1); + emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type + ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET); + } } - emit_call_with_imm_arg(emit, MP_F_STORE_GLOBAL, qst, REG_ARG_1); // arg1 = name + emit_call_with_qstr_arg(emit, MP_F_STORE_NAME + kind, qst, REG_ARG_1); // arg1 = name emit_post(emit); } @@ -1205,7 +1604,7 @@ STATIC void emit_native_store_attr(emit_t *emit, qstr qst) { emit_pre_pop_reg_reg(emit, &vtype_base, REG_ARG_1, &vtype_val, REG_ARG_3); // arg1 = base, arg3 = value assert(vtype_base == VTYPE_PYOBJ); assert(vtype_val == VTYPE_PYOBJ); - emit_call_with_imm_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name + emit_call_with_qstr_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name emit_post(emit); } @@ -1288,10 +1687,6 @@ STATIC void emit_native_store_subscr(emit_t *emit) { } #endif ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1); - #if N_ARM - asm_arm_strh_reg_reg_reg(emit->as, reg_value, reg_base, reg_index); - return; - #endif ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base reg_base = reg_index; } @@ -1308,11 +1703,12 @@ STATIC void emit_native_store_subscr(emit_t *emit) { break; } #endif - ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2); #if N_ARM + ASM_MOV_REG_IMM(emit->as, reg_index, index_value); asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index); return; #endif + ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2); ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base reg_base = reg_index; } @@ -1389,30 +1785,23 @@ STATIC void emit_native_store_subscr(emit_t *emit) { } } -STATIC void emit_native_delete_fast(emit_t *emit, qstr qst, mp_uint_t local_num) { - // TODO: This is not compliant implementation. We could use MP_OBJ_SENTINEL - // to mark deleted vars but then every var would need to be checked on - // each access. Very inefficient, so just set value to None to enable GC. - emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE); - emit_native_store_fast(emit, qst, local_num); +STATIC void emit_native_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) { + if (kind == MP_EMIT_IDOP_LOCAL_FAST) { + // TODO: This is not compliant implementation. We could use MP_OBJ_SENTINEL + // to mark deleted vars but then every var would need to be checked on + // each access. Very inefficient, so just set value to None to enable GC. + emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE); + emit_native_store_fast(emit, qst, local_num); + } else { + // TODO implement me! + } } -STATIC void emit_native_delete_deref(emit_t *emit, qstr qst, mp_uint_t local_num) { - // TODO implement me! - (void)emit; - (void)qst; - (void)local_num; -} - -STATIC void emit_native_delete_name(emit_t *emit, qstr qst) { +STATIC void emit_native_delete_global(emit_t *emit, qstr qst, int kind) { + MP_STATIC_ASSERT(MP_F_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_DELETE_NAME); + MP_STATIC_ASSERT(MP_F_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_DELETE_GLOBAL); emit_native_pre(emit); - emit_call_with_imm_arg(emit, MP_F_DELETE_NAME, qst, REG_ARG_1); - emit_post(emit); -} - -STATIC void emit_native_delete_global(emit_t *emit, qstr qst) { - emit_native_pre(emit); - emit_call_with_imm_arg(emit, MP_F_DELETE_GLOBAL, qst, REG_ARG_1); + emit_call_with_qstr_arg(emit, MP_F_DELETE_NAME + kind, qst, REG_ARG_1); emit_post(emit); } @@ -1420,7 +1809,8 @@ STATIC void emit_native_delete_attr(emit_t *emit, qstr qst) { vtype_kind_t vtype_base; emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base assert(vtype_base == VTYPE_PYOBJ); - emit_call_with_2_imm_args(emit, MP_F_STORE_ATTR, qst, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3); // arg2 = attribute name, arg3 = value (null for delete) + ASM_XOR_REG_REG(emit->as, REG_ARG_3, REG_ARG_3); // arg3 = value (null for delete) + emit_call_with_qstr_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name emit_post(emit); } @@ -1432,6 +1822,26 @@ STATIC void emit_native_delete_subscr(emit_t *emit) { emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3); } +STATIC void emit_native_subscr(emit_t *emit, int kind) { + if (kind == MP_EMIT_SUBSCR_LOAD) { + emit_native_load_subscr(emit); + } else if (kind == MP_EMIT_SUBSCR_STORE) { + emit_native_store_subscr(emit); + } else { + emit_native_delete_subscr(emit); + } +} + +STATIC void emit_native_attr(emit_t *emit, qstr qst, int kind) { + if (kind == MP_EMIT_ATTR_LOAD) { + emit_native_load_attr(emit, qst); + } else if (kind == MP_EMIT_ATTR_STORE) { + emit_native_store_attr(emit, qst); + } else { + emit_native_delete_attr(emit, qst); + } +} + STATIC void emit_native_dup_top(emit_t *emit) { DEBUG_printf("dup_top\n"); vtype_kind_t vtype; @@ -1475,7 +1885,7 @@ STATIC void emit_native_jump(emit_t *emit, mp_uint_t label) { emit_post(emit); } -STATIC void emit_native_jump_helper(emit_t *emit, bool pop) { +STATIC void emit_native_jump_helper(emit_t *emit, bool cond, mp_uint_t label, bool pop) { vtype_kind_t vtype = peek_vtype(emit, 0); if (vtype == VTYPE_PYOBJ) { emit_pre_pop_reg(emit, &vtype, REG_ARG_1); @@ -1500,39 +1910,69 @@ STATIC void emit_native_jump_helper(emit_t *emit, bool pop) { } // need to commit stack because we may jump elsewhere need_stack_settled(emit); + // Emit the jump + if (cond) { + ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label, vtype == VTYPE_PYOBJ); + } else { + ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label, vtype == VTYPE_PYOBJ); + } + if (!pop) { + adjust_stack(emit, -1); + } + emit_post(emit); } STATIC void emit_native_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) { DEBUG_printf("pop_jump_if(cond=%u, label=" UINT_FMT ")\n", cond, label); - emit_native_jump_helper(emit, true); - if (cond) { - ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label); - } else { - ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label); - } - emit_post(emit); + emit_native_jump_helper(emit, cond, label, true); } STATIC void emit_native_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) { DEBUG_printf("jump_if_or_pop(cond=%u, label=" UINT_FMT ")\n", cond, label); - emit_native_jump_helper(emit, false); - if (cond) { - ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label); - } else { - ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label); + emit_native_jump_helper(emit, cond, label, false); +} + +STATIC void emit_native_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) { + if (except_depth > 0) { + exc_stack_entry_t *first_finally = NULL; + exc_stack_entry_t *prev_finally = NULL; + exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1]; + for (; except_depth > 0; --except_depth, --e) { + if (e->is_finally && e->is_active) { + // Found an active finally handler + if (first_finally == NULL) { + first_finally = e; + } + if (prev_finally != NULL) { + // Mark prev finally as needed to unwind a jump + prev_finally->unwind_label = e->label; + } + prev_finally = e; + } + } + if (prev_finally == NULL) { + // No finally, handle the jump ourselves + // First, restore the exception handler address for the jump + if (e < emit->exc_stack) { + ASM_XOR_REG_REG(emit->as, REG_RET, REG_RET); + } else { + ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label); + } + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET); + } else { + // Last finally should do our jump for us + // Mark finally as needing to decide the type of jump + prev_finally->unwind_label = UNWIND_LABEL_DO_FINAL_UNWIND; + ASM_MOV_REG_PCREL(emit->as, REG_RET, label & ~MP_EMIT_BREAK_FROM_FOR); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_RET); + // Cancel any active exception (see also emit_native_pop_except_jump) + emit_native_mov_reg_const(emit, REG_RET, MP_F_CONST_NONE_OBJ); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_RET); + // Jump to the innermost active finally + label = first_finally->label; + } } - adjust_stack(emit, -1); - emit_post(emit); -} - -STATIC void emit_native_break_loop(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) { - (void)except_depth; - emit_native_jump(emit, label & ~MP_EMIT_BREAK_FROM_FOR); // TODO properly -} - -STATIC void emit_native_continue_loop(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) { - (void)except_depth; - emit_native_jump(emit, label); // TODO properly + emit_native_jump(emit, label & ~MP_EMIT_BREAK_FROM_FOR); } STATIC void emit_native_setup_with(emit_t *emit, mp_uint_t label) { @@ -1544,7 +1984,7 @@ STATIC void emit_native_setup_with(emit_t *emit, mp_uint_t label) { emit_access_stack(emit, 1, &vtype, REG_ARG_1); // arg1 = ctx_mgr assert(vtype == VTYPE_PYOBJ); emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr - emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___exit__, REG_ARG_2); + emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___exit__, REG_ARG_2); // stack: (..., ctx_mgr, __exit__, self) emit_pre_pop_reg(emit, &vtype, REG_ARG_3); // self @@ -1557,7 +1997,7 @@ STATIC void emit_native_setup_with(emit_t *emit, mp_uint_t label) { // get __enter__ method emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr - emit_call_with_imm_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___enter__, REG_ARG_2); // arg2 = method name + emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___enter__, REG_ARG_2); // arg2 = method name // stack: (..., __exit__, self, __enter__, self) // call __enter__ method @@ -1568,96 +2008,90 @@ STATIC void emit_native_setup_with(emit_t *emit, mp_uint_t label) { // need to commit stack because we may jump elsewhere need_stack_settled(emit); - emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_1, sizeof(nlr_buf_t) / sizeof(mp_uint_t)); // arg1 = pointer to nlr buf - emit_call(emit, MP_F_NLR_PUSH); - ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label); + emit_native_push_exc_stack(emit, label, true); - emit_access_stack(emit, sizeof(nlr_buf_t) / sizeof(mp_uint_t) + 1, &vtype, REG_RET); // access return value of __enter__ - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // push return value of __enter__ - // stack: (..., __exit__, self, as_value, nlr_buf, as_value) + emit_native_dup_top(emit); + // stack: (..., __exit__, self, as_value, as_value) +} + +STATIC void emit_native_setup_block(emit_t *emit, mp_uint_t label, int kind) { + if (kind == MP_EMIT_SETUP_BLOCK_WITH) { + emit_native_setup_with(emit, label); + } else { + // Set up except and finally + emit_native_pre(emit); + need_stack_settled(emit); + emit_native_push_exc_stack(emit, label, kind == MP_EMIT_SETUP_BLOCK_FINALLY); + emit_post(emit); + } } STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) { - // note: label+1 is available as an auxiliary label + // Note: 3 labels are reserved for this function, starting at *emit->label_slot - // stack: (..., __exit__, self, as_value, nlr_buf) + // stack: (..., __exit__, self, as_value) emit_native_pre(emit); - emit_call(emit, MP_F_NLR_POP); - adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)) - 1); + emit_native_leave_exc_stack(emit, false); + adjust_stack(emit, -1); // stack: (..., __exit__, self) + // Label for case where __exit__ is called from an unwind jump + emit_native_label_assign(emit, *emit->label_slot + 2); + // call __exit__ - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none); - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none); - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none); + emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); + emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); + emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5); emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2); - // jump to after with cleanup nlr_catch block - adjust_stack(emit, 1); // dummy nlr_buf.prev - emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE); // nlr_buf.ret_val = no exception - emit_native_jump(emit, label + 1); + // Replace exc with None and finish + emit_native_jump(emit, *emit->label_slot); // nlr_catch - emit_native_label_assign(emit, label); + // Don't use emit_native_label_assign because this isn't a real finally label + mp_asm_base_label_assign(&emit->as->base, label); - // adjust stack counter for: __exit__, self, as_value - adjust_stack(emit, 3); - // stack: (..., __exit__, self, as_value, nlr_buf.prev, nlr_buf.ret_val) + // Leave with's exception handler + emit_native_leave_exc_stack(emit, true); - vtype_kind_t vtype; - emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // get the thrown value (exc) - adjust_stack(emit, -2); // discard nlr_buf.prev and as_value + // Adjust stack counter for: __exit__, self (implicitly discard as_value which is above self) + emit_native_adjust_stack_size(emit, 2); // stack: (..., __exit__, self) - // REG_ARG_1=exc - emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // self - emit_pre_pop_reg(emit, &vtype, REG_ARG_3); // __exit__ - adjust_stack(emit, 1); // dummy nlr_buf.prev - emit_post_push_reg(emit, vtype, REG_ARG_1); // push exc to save it for later - emit_post_push_reg(emit, vtype, REG_ARG_3); // __exit__ - emit_post_push_reg(emit, vtype, REG_ARG_2); // self - // stack: (..., exc, __exit__, self) - // REG_ARG_1=exc + ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); // get exc + + // Check if exc is None and jump to non-exc handler if it is + emit_native_mov_reg_const(emit, REG_ARG_2, MP_F_CONST_NONE_OBJ); + ASM_JUMP_IF_REG_EQ(emit->as, REG_ARG_1, REG_ARG_2, *emit->label_slot + 2); ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_1, 0); // get type(exc) emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_2); // push type(exc) emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_1); // push exc value - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none); // traceback info - // stack: (..., exc, __exit__, self, type(exc), exc, traceback) + emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); // traceback info + // Stack: (..., __exit__, self, type(exc), exc, traceback) // call __exit__ method emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5); emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2); - // stack: (..., exc) + // Stack: (...) - // if REG_RET is true then we need to replace top-of-stack with None (swallow exception) + // If REG_RET is true then we need to replace exception with None (swallow exception) if (REG_ARG_1 != REG_RET) { ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_RET); } emit_call(emit, MP_F_OBJ_IS_TRUE); - ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label + 1); + ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot + 1, true); - // replace exc with None - emit_pre_pop_discard(emit); - emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)mp_const_none); + // Replace exception with None + emit_native_label_assign(emit, *emit->label_slot); + emit_native_mov_reg_const(emit, REG_TEMP0, MP_F_CONST_NONE_OBJ); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0); // end of with cleanup nlr_catch block - emit_native_label_assign(emit, label + 1); -} + emit_native_label_assign(emit, *emit->label_slot + 1); -STATIC void emit_native_setup_except(emit_t *emit, mp_uint_t label) { - emit_native_pre(emit); - // need to commit stack because we may jump elsewhere - need_stack_settled(emit); - emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_1, sizeof(nlr_buf_t) / sizeof(mp_uint_t)); // arg1 = pointer to nlr buf - emit_call(emit, MP_F_NLR_PUSH); - ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label); - emit_post(emit); -} - -STATIC void emit_native_setup_finally(emit_t *emit, mp_uint_t label) { - emit_native_setup_except(emit, label); + // Exception is in nlr_buf.ret_val slot } STATIC void emit_native_end_finally(emit_t *emit) { @@ -1666,10 +2100,23 @@ STATIC void emit_native_end_finally(emit_t *emit) { // if exc == None: pass // else: raise exc // the check if exc is None is done in the MP_F_NATIVE_RAISE stub - vtype_kind_t vtype; - emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // get nlr_buf.ret_val - emit_pre_pop_discard(emit); // discard nlr_buf.prev + emit_native_pre(emit); + ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); emit_call(emit, MP_F_NATIVE_RAISE); + + // Get state for this finally and see if we need to unwind + exc_stack_entry_t *e = emit_native_pop_exc_stack(emit); + if (e->unwind_label != UNWIND_LABEL_UNUSED) { + ASM_MOV_REG_LOCAL(emit->as, REG_RET, LOCAL_IDX_EXC_HANDLER_UNWIND(emit)); + ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot, false); + if (e->unwind_label == UNWIND_LABEL_DO_FINAL_UNWIND) { + ASM_JUMP_REG(emit->as, REG_RET); + } else { + emit_native_jump(emit, e->unwind_label); + } + emit_native_label_assign(emit, *emit->label_slot); + } + emit_post(emit); } @@ -1696,8 +2143,13 @@ STATIC void emit_native_for_iter(emit_t *emit, mp_uint_t label) { emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_1, MP_OBJ_ITER_BUF_NSLOTS); adjust_stack(emit, MP_OBJ_ITER_BUF_NSLOTS); emit_call(emit, MP_F_NATIVE_ITERNEXT); + #if MICROPY_DEBUG_MP_OBJ_SENTINELS ASM_MOV_REG_IMM(emit->as, REG_TEMP1, (mp_uint_t)MP_OBJ_STOP_ITERATION); ASM_JUMP_IF_REG_EQ(emit->as, REG_RET, REG_TEMP1, label); + #else + MP_STATIC_ASSERT(MP_OBJ_STOP_ITERATION == 0); + ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label, false); + #endif emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -1708,15 +2160,15 @@ STATIC void emit_native_for_iter_end(emit_t *emit) { emit_post(emit); } -STATIC void emit_native_pop_block(emit_t *emit) { - emit_native_pre(emit); - emit_call(emit, MP_F_NLR_POP); - adjust_stack(emit, -(mp_int_t)(sizeof(nlr_buf_t) / sizeof(mp_uint_t)) + 1); - emit_post(emit); -} - -STATIC void emit_native_pop_except(emit_t *emit) { - (void)emit; +STATIC void emit_native_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) { + if (within_exc_handler) { + // Cancel any active exception so subsequent handlers don't see it + emit_native_mov_reg_const(emit, REG_TEMP0, MP_F_CONST_NONE_OBJ); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0); + } else { + emit_native_leave_exc_stack(emit, false); + } + emit_native_jump(emit, label); } STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) { @@ -1775,17 +2227,16 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) { int reg_rhs = REG_ARG_3; emit_pre_pop_reg_flexible(emit, &vtype_rhs, ®_rhs, REG_RET, REG_ARG_2); emit_pre_pop_reg(emit, &vtype_lhs, REG_ARG_2); - if (0) { - // dummy #if !(N_X64 || N_X86) - } else if (op == MP_BINARY_OP_LSHIFT) { + if (op == MP_BINARY_OP_LSHIFT) { ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs); emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2); } else if (op == MP_BINARY_OP_RSHIFT) { ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs); emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2); + } else #endif - } else if (op == MP_BINARY_OP_OR) { + if (op == MP_BINARY_OP_OR) { ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs); emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2); } else if (op == MP_BINARY_OP_XOR) { @@ -1910,26 +2361,29 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) { } } -STATIC void emit_native_build_tuple(emit_t *emit, mp_uint_t n_args) { +#if MICROPY_PY_BUILTINS_SLICE +STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args); +#endif + +STATIC void emit_native_build(emit_t *emit, mp_uint_t n_args, int kind) { // for viper: call runtime, with types of args // if wrapped in byte_array, or something, allocates memory and fills it + MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_TUPLE == MP_F_BUILD_TUPLE); + MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_LIST == MP_F_BUILD_LIST); + MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_MAP == MP_F_BUILD_MAP); + MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_SET == MP_F_BUILD_SET); + #if MICROPY_PY_BUILTINS_SLICE + if (kind == MP_EMIT_BUILD_SLICE) { + emit_native_build_slice(emit, n_args); + return; + } + #endif emit_native_pre(emit); - emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items - emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE, n_args, REG_ARG_1); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new tuple -} - -STATIC void emit_native_build_list(emit_t *emit, mp_uint_t n_args) { - emit_native_pre(emit); - emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items - emit_call_with_imm_arg(emit, MP_F_BUILD_LIST, n_args, REG_ARG_1); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new list -} - -STATIC void emit_native_build_map(emit_t *emit, mp_uint_t n_args) { - emit_native_pre(emit); - emit_call_with_imm_arg(emit, MP_F_BUILD_MAP, n_args, REG_ARG_1); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new map + if (kind == MP_EMIT_BUILD_TUPLE || kind == MP_EMIT_BUILD_LIST || kind == MP_EMIT_BUILD_SET) { + emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items + } + emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE + kind, n_args, REG_ARG_1); + emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new tuple/list/map/set } STATIC void emit_native_store_map(emit_t *emit) { @@ -1942,15 +2396,6 @@ STATIC void emit_native_store_map(emit_t *emit) { emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // map } -#if MICROPY_PY_BUILTINS_SET -STATIC void emit_native_build_set(emit_t *emit, mp_uint_t n_args) { - emit_native_pre(emit); - emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items - emit_call_with_imm_arg(emit, MP_F_BUILD_SET, n_args, REG_ARG_1); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new set -} -#endif - #if MICROPY_PY_BUILTINS_SLICE STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args) { DEBUG_printf("build_slice %d\n", n_args); @@ -1959,8 +2404,7 @@ STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args) { emit_pre_pop_reg_reg(emit, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop assert(vtype_start == VTYPE_PYOBJ); assert(vtype_stop == VTYPE_PYOBJ); - emit_call_with_imm_arg(emit, MP_F_NEW_SLICE, (mp_uint_t)mp_const_none, REG_ARG_3); // arg3 = step - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); + emit_native_mov_reg_const(emit, REG_ARG_3, MP_F_CONST_NONE_OBJ); // arg3 = step } else { assert(n_args == 3); vtype_kind_t vtype_start, vtype_stop, vtype_step; @@ -1968,9 +2412,9 @@ STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args) { assert(vtype_start == VTYPE_PYOBJ); assert(vtype_stop == VTYPE_PYOBJ); assert(vtype_step == VTYPE_PYOBJ); - emit_call(emit, MP_F_NEW_SLICE); - emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } + emit_call(emit, MP_F_NEW_SLICE); + emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } #endif @@ -2025,14 +2469,18 @@ STATIC void emit_native_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_ // call runtime, with type info for args, or don't support dict/default params, or only support Python objects for them emit_native_pre(emit); if (n_pos_defaults == 0 && n_kw_defaults == 0) { - emit_call_with_3_imm_args_and_first_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, (mp_uint_t)scope->raw_code, REG_ARG_1, (mp_uint_t)MP_OBJ_NULL, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3); + need_reg_all(emit); + ASM_MOV_REG_IMM(emit->as, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL); + ASM_MOV_REG_IMM(emit->as, REG_ARG_3, (mp_uint_t)MP_OBJ_NULL); } else { vtype_kind_t vtype_def_tuple, vtype_def_dict; emit_pre_pop_reg_reg(emit, &vtype_def_dict, REG_ARG_3, &vtype_def_tuple, REG_ARG_2); assert(vtype_def_tuple == VTYPE_PYOBJ); assert(vtype_def_dict == VTYPE_PYOBJ); - emit_call_with_imm_arg_aligned(emit, MP_F_MAKE_FUNCTION_FROM_RAW_CODE, (mp_uint_t)scope->raw_code, REG_ARG_1); + need_reg_all(emit); } + emit_load_reg_with_raw_code(emit, REG_ARG_1, scope->raw_code); + ASM_CALL_IND(emit->as, MP_F_MAKE_FUNCTION_FROM_RAW_CODE); emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -2045,8 +2493,8 @@ STATIC void emit_native_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_c emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_closed_over + 2); ASM_MOV_REG_IMM(emit->as, REG_ARG_2, 0x100 | n_closed_over); } - ASM_MOV_REG_ALIGNED_IMM(emit->as, REG_ARG_1, (mp_uint_t)scope->raw_code); - ASM_CALL_IND(emit->as, mp_fun_table[MP_F_MAKE_CLOSURE_FROM_RAW_CODE], MP_F_MAKE_CLOSURE_FROM_RAW_CODE); + emit_load_reg_with_raw_code(emit, REG_ARG_1, scope->raw_code); + ASM_CALL_IND(emit->as, MP_F_MAKE_CLOSURE_FROM_RAW_CODE); emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); } @@ -2120,33 +2568,58 @@ STATIC void emit_native_call_method(emit_t *emit, mp_uint_t n_positional, mp_uin STATIC void emit_native_return_value(emit_t *emit) { DEBUG_printf("return_value\n"); + + if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) { + // Save pointer to current stack position for caller to access return value + emit_get_stack_pointer_to_reg_for_pop(emit, REG_TEMP0, 1); + emit_native_mov_state_reg(emit, offsetof(mp_code_state_t, sp) / sizeof(uintptr_t), REG_TEMP0); + + // Put return type in return value slot + ASM_MOV_REG_IMM(emit->as, REG_TEMP0, MP_VM_RETURN_NORMAL); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_TEMP0); + + // Do the unwinding jump to get to the return handler + emit_native_unwind_jump(emit, emit->exit_label, emit->exc_stack_size); + emit->last_emit_was_return_value = true; + return; + } + if (emit->do_viper_types) { + vtype_kind_t return_vtype = emit->scope->scope_flags >> MP_SCOPE_FLAG_VIPERRET_POS; if (peek_vtype(emit, 0) == VTYPE_PTR_NONE) { emit_pre_pop_discard(emit); - if (emit->return_vtype == VTYPE_PYOBJ) { - ASM_MOV_REG_IMM(emit->as, REG_RET, (mp_uint_t)mp_const_none); + if (return_vtype == VTYPE_PYOBJ) { + emit_native_mov_reg_const(emit, REG_RET, MP_F_CONST_NONE_OBJ); } else { - ASM_MOV_REG_IMM(emit->as, REG_RET, 0); + ASM_MOV_REG_IMM(emit->as, REG_ARG_1, 0); } } else { vtype_kind_t vtype; - emit_pre_pop_reg(emit, &vtype, REG_RET); - if (vtype != emit->return_vtype) { + emit_pre_pop_reg(emit, &vtype, return_vtype == VTYPE_PYOBJ ? REG_RET : REG_ARG_1); + if (vtype != return_vtype) { EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "return expected '%q' but got '%q'", - vtype_to_qstr(emit->return_vtype), vtype_to_qstr(vtype)); + vtype_to_qstr(return_vtype), vtype_to_qstr(vtype)); } } + if (return_vtype != VTYPE_PYOBJ) { + emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, return_vtype, REG_ARG_2); + } } else { vtype_kind_t vtype; emit_pre_pop_reg(emit, &vtype, REG_RET); assert(vtype == VTYPE_PYOBJ); } + if (NEED_GLOBAL_EXC_HANDLER(emit)) { + // Save return value for the global exception handler to use + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_RET); + } + emit_native_unwind_jump(emit, emit->exit_label, emit->exc_stack_size); emit->last_emit_was_return_value = true; - ASM_EXIT(emit->as); } STATIC void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) { + (void)n_args; assert(n_args == 1); vtype_kind_t vtype_exc; emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise @@ -2157,34 +2630,107 @@ STATIC void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) { emit_call(emit, MP_F_NATIVE_RAISE); } -STATIC void emit_native_yield_value(emit_t *emit) { - // not supported (for now) - (void)emit; - mp_raise_NotImplementedError("native yield"); -} -STATIC void emit_native_yield_from(emit_t *emit) { - // not supported (for now) - (void)emit; - mp_raise_NotImplementedError("native yield from"); +STATIC void emit_native_yield(emit_t *emit, int kind) { + // Note: 1 (yield) or 3 (yield from) labels are reserved for this function, starting at *emit->label_slot + + if (emit->do_viper_types) { + mp_raise_NotImplementedError("native yield"); + } + emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR; + + need_stack_settled(emit); + + if (kind == MP_EMIT_YIELD_FROM) { + + // Top of yield-from loop, conceptually implementing: + // for item in generator: + // yield item + + // Jump to start of loop + emit_native_jump(emit, *emit->label_slot + 2); + + // Label for top of loop + emit_native_label_assign(emit, *emit->label_slot + 1); + } + + // Save pointer to current stack position for caller to access yielded value + emit_get_stack_pointer_to_reg_for_pop(emit, REG_TEMP0, 1); + emit_native_mov_state_reg(emit, offsetof(mp_code_state_t, sp) / sizeof(uintptr_t), REG_TEMP0); + + // Put return type in return value slot + ASM_MOV_REG_IMM(emit->as, REG_TEMP0, MP_VM_RETURN_YIELD); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_TEMP0); + + // Save re-entry PC + ASM_MOV_REG_PCREL(emit->as, REG_TEMP0, *emit->label_slot); + emit_native_mov_state_reg(emit, LOCAL_IDX_GEN_PC(emit), REG_TEMP0); + + // Jump to exit handler + ASM_JUMP(emit->as, emit->exit_label); + + // Label re-entry point + mp_asm_base_label_assign(&emit->as->base, *emit->label_slot); + + // Re-open any active exception handler + if (emit->exc_stack_size > 0) { + // Find innermost active exception handler, to restore as current handler + exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1]; + for (; e >= emit->exc_stack; --e) { + if (e->is_active) { + // Found active handler, get its PC + ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label); + ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET); + } + } + } + + emit_native_adjust_stack_size(emit, 1); // send_value + + if (kind == MP_EMIT_YIELD_VALUE) { + // Check LOCAL_IDX_EXC_VAL for any injected value + ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); + emit_call(emit, MP_F_NATIVE_RAISE); + } else { + // Label loop entry + emit_native_label_assign(emit, *emit->label_slot + 2); + + // Get the next item from the delegate generator + vtype_kind_t vtype; + emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // send_value + emit_access_stack(emit, 1, &vtype, REG_ARG_1); // generator + ASM_MOV_REG_LOCAL(emit->as, REG_ARG_3, LOCAL_IDX_EXC_VAL(emit)); // throw_value + emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_3); + emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 1); // ret_value + emit_call(emit, MP_F_NATIVE_YIELD_FROM); + + // If returned non-zero then generator continues + ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, *emit->label_slot + 1, true); + + // Pop exhausted gen, replace with ret_value + emit_native_adjust_stack_size(emit, 1); // ret_value + emit_fold_stack_top(emit, REG_ARG_1); + } } STATIC void emit_native_start_except_handler(emit_t *emit) { - // This instruction follows an nlr_pop, so the stack counter is back to zero, when really - // it should be up by a whole nlr_buf_t. We then want to pop the nlr_buf_t here, but save - // the first 2 elements, so we can get the thrown value. - adjust_stack(emit, 1); - vtype_kind_t vtype_nlr; - emit_pre_pop_reg(emit, &vtype_nlr, REG_ARG_1); // get the thrown value - emit_pre_pop_discard(emit); // discard the linked-list pointer in the nlr_buf - emit_post_push_reg_reg_reg(emit, VTYPE_PYOBJ, REG_ARG_1, VTYPE_PYOBJ, REG_ARG_1, VTYPE_PYOBJ, REG_ARG_1); // push the 3 exception items + // Protected block has finished so leave the current exception handler + emit_native_leave_exc_stack(emit, true); + + // Get and push nlr_buf.ret_val + ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, LOCAL_IDX_EXC_VAL(emit)); + emit_post_push_reg(emit, VTYPE_PYOBJ, REG_TEMP0); } STATIC void emit_native_end_except_handler(emit_t *emit) { - adjust_stack(emit, -1); + adjust_stack(emit, -1); // pop the exception (end_finally didn't use it) } const emit_method_table_t EXPORT_FUN(method_table) = { - emit_native_set_native_type, + #if MICROPY_DYNAMIC_COMPILER + EXPORT_FUN(new), + EXPORT_FUN(free), + #endif + emit_native_start_pass, emit_native_end_pass, emit_native_last_emit_was_return_value, @@ -2192,41 +2738,29 @@ const emit_method_table_t EXPORT_FUN(method_table) = { emit_native_set_source_line, { - emit_native_load_fast, - emit_native_load_deref, - emit_native_load_name, + emit_native_load_local, emit_native_load_global, }, { - emit_native_store_fast, - emit_native_store_deref, - emit_native_store_name, + emit_native_store_local, emit_native_store_global, }, { - emit_native_delete_fast, - emit_native_delete_deref, - emit_native_delete_name, + emit_native_delete_local, emit_native_delete_global, }, emit_native_label_assign, - emit_native_import_name, - emit_native_import_from, - emit_native_import_star, + emit_native_import, emit_native_load_const_tok, emit_native_load_const_small_int, emit_native_load_const_str, emit_native_load_const_obj, emit_native_load_null, - emit_native_load_attr, emit_native_load_method, emit_native_load_build_class, - emit_native_load_subscr, - emit_native_store_attr, - emit_native_store_subscr, - emit_native_delete_attr, - emit_native_delete_subscr, + emit_native_subscr, + emit_native_attr, emit_native_dup_top, emit_native_dup_top_two, emit_native_pop_top, @@ -2235,30 +2769,18 @@ const emit_method_table_t EXPORT_FUN(method_table) = { emit_native_jump, emit_native_pop_jump_if, emit_native_jump_if_or_pop, - emit_native_break_loop, - emit_native_continue_loop, - emit_native_setup_with, + emit_native_unwind_jump, + emit_native_setup_block, emit_native_with_cleanup, - emit_native_setup_except, - emit_native_setup_finally, emit_native_end_finally, emit_native_get_iter, emit_native_for_iter, emit_native_for_iter_end, - emit_native_pop_block, - emit_native_pop_except, + emit_native_pop_except_jump, emit_native_unary_op, emit_native_binary_op, - emit_native_build_tuple, - emit_native_build_list, - emit_native_build_map, + emit_native_build, emit_native_store_map, - #if MICROPY_PY_BUILTINS_SET - emit_native_build_set, - #endif - #if MICROPY_PY_BUILTINS_SLICE - emit_native_build_slice, - #endif emit_native_store_comp, emit_native_unpack_sequence, emit_native_unpack_ex, @@ -2268,8 +2790,7 @@ const emit_method_table_t EXPORT_FUN(method_table) = { emit_native_call_method, emit_native_return_value, emit_native_raise_varargs, - emit_native_yield_value, - emit_native_yield_from, + emit_native_yield, emit_native_start_except_handler, emit_native_end_except_handler, diff --git a/python/src/py/emitnthumb.c b/python/src/py/emitnthumb.c index 2b68ca3a1..1c33e7a68 100644 --- a/python/src/py/emitnthumb.c +++ b/python/src/py/emitnthumb.c @@ -8,6 +8,11 @@ #define GENERIC_ASM_API (1) #include "py/asmthumb.h" +// Word indices of REG_LOCAL_x in nlr_buf_t +#define NLR_BUF_IDX_LOCAL_1 (3) // r4 +#define NLR_BUF_IDX_LOCAL_2 (4) // r5 +#define NLR_BUF_IDX_LOCAL_3 (5) // r6 + #define N_THUMB (1) #define EXPORT_FUN(name) emit_native_thumb_##name #include "py/emitnative.c" diff --git a/python/src/py/emitnx64.c b/python/src/py/emitnx64.c index b9800f636..4abb3ecad 100644 --- a/python/src/py/emitnx64.c +++ b/python/src/py/emitnx64.c @@ -8,6 +8,11 @@ #define GENERIC_ASM_API (1) #include "py/asmx64.h" +// Word indices of REG_LOCAL_x in nlr_buf_t +#define NLR_BUF_IDX_LOCAL_1 (5) // rbx +#define NLR_BUF_IDX_LOCAL_2 (6) // r12 +#define NLR_BUF_IDX_LOCAL_3 (7) // r13 + #define N_X64 (1) #define EXPORT_FUN(name) emit_native_x64_##name #include "py/emitnative.c" diff --git a/python/src/py/emitnx86.c b/python/src/py/emitnx86.c index 5d2bbb267..7c96c3b82 100644 --- a/python/src/py/emitnx86.c +++ b/python/src/py/emitnx86.c @@ -9,10 +9,16 @@ #define GENERIC_ASM_API (1) #include "py/asmx86.h" +// Word indices of REG_LOCAL_x in nlr_buf_t +#define NLR_BUF_IDX_LOCAL_1 (5) // ebx +#define NLR_BUF_IDX_LOCAL_2 (7) // esi +#define NLR_BUF_IDX_LOCAL_3 (6) // edi + // x86 needs a table to know how many args a given function has STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = { [MP_F_CONVERT_OBJ_TO_NATIVE] = 2, [MP_F_CONVERT_NATIVE_TO_OBJ] = 2, + [MP_F_NATIVE_SWAP_GLOBALS] = 1, [MP_F_LOAD_NAME] = 1, [MP_F_LOAD_GLOBAL] = 1, [MP_F_LOAD_BUILD_CLASS] = 0, @@ -56,9 +62,11 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = { [MP_F_DELETE_GLOBAL] = 1, [MP_F_NEW_CELL] = 1, [MP_F_MAKE_CLOSURE_FROM_RAW_CODE] = 3, - [MP_F_SETUP_CODE_STATE] = 5, + [MP_F_ARG_CHECK_NUM_SIG] = 3, + [MP_F_SETUP_CODE_STATE] = 4, [MP_F_SMALL_INT_FLOOR_DIVIDE] = 2, [MP_F_SMALL_INT_MODULO] = 2, + [MP_F_NATIVE_YIELD_FROM] = 3, }; #define N_X86 (1) diff --git a/python/src/py/emitnxtensa.c b/python/src/py/emitnxtensa.c index 1a423e21e..34089e90d 100644 --- a/python/src/py/emitnxtensa.c +++ b/python/src/py/emitnxtensa.c @@ -8,6 +8,11 @@ #define GENERIC_ASM_API (1) #include "py/asmxtensa.h" +// Word indices of REG_LOCAL_x in nlr_buf_t +#define NLR_BUF_IDX_LOCAL_1 (8) // a12 +#define NLR_BUF_IDX_LOCAL_2 (9) // a13 +#define NLR_BUF_IDX_LOCAL_3 (10) // a14 + #define N_XTENSA (1) #define EXPORT_FUN(name) emit_native_xtensa_##name #include "py/emitnative.c" diff --git a/python/src/py/frozenmod.h b/python/src/py/frozenmod.h index 8cddef681..8a477d028 100644 --- a/python/src/py/frozenmod.h +++ b/python/src/py/frozenmod.h @@ -3,7 +3,8 @@ * * The MIT License (MIT) * - * Copyright (c) 2014 Damien P. George + * Copyright (c) 2015 Paul Sokolovsky + * Copyright (c) 2016 Damien P. George * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal diff --git a/python/src/py/gc.c b/python/src/py/gc.c index 5b5088920..c763a839e 100644 --- a/python/src/py/gc.c +++ b/python/src/py/gc.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -328,7 +329,9 @@ void gc_collect_start(void) { // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals, // dict_globals, then the root pointer section of mp_state_vm. void **ptrs = (void**)(void*)&mp_state_ctx; - gc_collect_root(ptrs, offsetof(mp_state_ctx_t, vm.qstr_last_chunk) / sizeof(void*)); + size_t root_start = offsetof(mp_state_ctx_t, thread.dict_locals); + size_t root_end = offsetof(mp_state_ctx_t, vm.qstr_last_chunk); + gc_collect_root(ptrs + root_start / sizeof(void*), (root_end - root_start) / sizeof(void*)); #if MICROPY_ENABLE_PYSTACK // Trace root pointers from the Python stack. @@ -360,6 +363,13 @@ void gc_collect_end(void) { GC_EXIT(); } +void gc_sweep_all(void) { + GC_ENTER(); + MP_STATE_MEM(gc_lock_depth)++; + MP_STATE_MEM(gc_stack_overflow) = 0; + gc_collect_end(); +} + void gc_info(gc_info_t *info) { GC_ENTER(); info->total = MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start); @@ -424,7 +434,8 @@ void gc_info(gc_info_t *info) { GC_EXIT(); } -void *gc_alloc(size_t n_bytes, bool has_finaliser) { +void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) { + bool has_finaliser = alloc_flags & GC_ALLOC_FLAG_HAS_FINALISER; size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK; DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks); @@ -451,13 +462,15 @@ void *gc_alloc(size_t n_bytes, bool has_finaliser) { if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) { GC_EXIT(); gc_collect(); + collected = 1; GC_ENTER(); } #endif for (;;) { - n_free = 0; // Fixes bug, should be written in the next MicroPython version + // look for a run of n_blocks available blocks + n_free = 0; for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) { byte a = MP_STATE_MEM(gc_alloc_table_start)[i]; if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; } @@ -900,7 +913,8 @@ void gc_dump_alloc_table(void) { GC_EXIT(); } -#if DEBUG_PRINT +#if 0 +// For testing the GC functions void gc_test(void) { mp_uint_t len = 500; mp_uint_t *heap = malloc(len); diff --git a/python/src/py/gc.h b/python/src/py/gc.h index 739349c1f..4690d3937 100644 --- a/python/src/py/gc.h +++ b/python/src/py/gc.h @@ -45,7 +45,14 @@ void gc_collect_start(void); void gc_collect_root(void **ptrs, size_t len); void gc_collect_end(void); -void *gc_alloc(size_t n_bytes, bool has_finaliser); +// Use this function to sweep the whole heap and run all finalisers +void gc_sweep_all(void); + +enum { + GC_ALLOC_FLAG_HAS_FINALISER = 1, +}; + +void *gc_alloc(size_t n_bytes, unsigned int alloc_flags); void gc_free(void *ptr); // does not call finaliser size_t gc_nbytes(const void *ptr); void *gc_realloc(void *ptr, size_t n_bytes, bool allow_move); diff --git a/python/src/py/grammar.h b/python/src/py/grammar.h index 6abb1de8c..5a5b682ac 100644 --- a/python/src/py/grammar.h +++ b/python/src/py/grammar.h @@ -122,8 +122,8 @@ DEF_RULE_NC(augassign, or(12), tok(DEL_PLUS_EQUAL), tok(DEL_MINUS_EQUAL), tok(DE DEF_RULE(del_stmt, c(del_stmt), and(2), tok(KW_DEL), rule(exprlist)) DEF_RULE(pass_stmt, c(generic_all_nodes), and(1), tok(KW_PASS)) DEF_RULE_NC(flow_stmt, or(5), rule(break_stmt), rule(continue_stmt), rule(return_stmt), rule(raise_stmt), rule(yield_stmt)) -DEF_RULE(break_stmt, c(break_stmt), and(1), tok(KW_BREAK)) -DEF_RULE(continue_stmt, c(continue_stmt), and(1), tok(KW_CONTINUE)) +DEF_RULE(break_stmt, c(break_cont_stmt), and(1), tok(KW_BREAK)) +DEF_RULE(continue_stmt, c(break_cont_stmt), and(1), tok(KW_CONTINUE)) DEF_RULE(return_stmt, c(return_stmt), and(2), tok(KW_RETURN), opt_rule(testlist)) DEF_RULE(yield_stmt, c(yield_stmt), and(1), rule(yield_expr)) DEF_RULE(raise_stmt, c(raise_stmt), and(2), tok(KW_RAISE), opt_rule(raise_stmt_arg)) @@ -157,8 +157,8 @@ DEF_RULE_NC(as_name, and_ident(2), tok(KW_AS), tok(NAME)) DEF_RULE_NC(import_as_names, list_with_end, rule(import_as_name), tok(DEL_COMMA)) DEF_RULE_NC(dotted_as_names, list, rule(dotted_as_name), tok(DEL_COMMA)) DEF_RULE_NC(dotted_name, list, tok(NAME), tok(DEL_PERIOD)) -DEF_RULE(global_stmt, c(global_stmt), and(2), tok(KW_GLOBAL), rule(name_list)) -DEF_RULE(nonlocal_stmt, c(nonlocal_stmt), and(2), tok(KW_NONLOCAL), rule(name_list)) +DEF_RULE(global_stmt, c(global_nonlocal_stmt), and(2), tok(KW_GLOBAL), rule(name_list)) +DEF_RULE(nonlocal_stmt, c(global_nonlocal_stmt), and(2), tok(KW_NONLOCAL), rule(name_list)) DEF_RULE_NC(name_list, list, tok(NAME), tok(DEL_COMMA)) DEF_RULE(assert_stmt, c(assert_stmt), and(3), tok(KW_ASSERT), rule(test), opt_rule(assert_stmt_extra)) DEF_RULE_NC(assert_stmt_extra, and_ident(2), tok(DEL_COMMA), rule(test)) @@ -231,8 +231,8 @@ DEF_RULE(lambdef_nocond, c(lambdef), and_blank(4), tok(KW_LAMBDA), opt_rule(vara // power: atom_expr ['**' factor] // atom_expr: 'await' atom trailer* | atom trailer* -DEF_RULE(or_test, c(or_test), list, rule(and_test), tok(KW_OR)) -DEF_RULE(and_test, c(and_test), list, rule(not_test), tok(KW_AND)) +DEF_RULE(or_test, c(or_and_test), list, rule(and_test), tok(KW_OR)) +DEF_RULE(and_test, c(or_and_test), list, rule(not_test), tok(KW_AND)) DEF_RULE_NC(not_test, or(2), rule(not_test_2), rule(comparison)) DEF_RULE(not_test_2, c(not_test_2), and(2), tok(KW_NOT), rule(not_test)) DEF_RULE(comparison, c(comparison), list, rule(expr), rule(comp_op)) @@ -241,9 +241,9 @@ DEF_RULE_NC(comp_op_not_in, and(2), tok(KW_NOT), tok(KW_IN)) DEF_RULE_NC(comp_op_is, and(2), tok(KW_IS), opt_rule(comp_op_is_not)) DEF_RULE_NC(comp_op_is_not, and(1), tok(KW_NOT)) DEF_RULE(star_expr, c(star_expr), and(2), tok(OP_STAR), rule(expr)) -DEF_RULE(expr, c(expr), list, rule(xor_expr), tok(OP_PIPE)) -DEF_RULE(xor_expr, c(xor_expr), list, rule(and_expr), tok(OP_CARET)) -DEF_RULE(and_expr, c(and_expr), list, rule(shift_expr), tok(OP_AMPERSAND)) +DEF_RULE(expr, c(binary_op), list, rule(xor_expr), tok(OP_PIPE)) +DEF_RULE(xor_expr, c(binary_op), list, rule(and_expr), tok(OP_CARET)) +DEF_RULE(and_expr, c(binary_op), list, rule(shift_expr), tok(OP_AMPERSAND)) DEF_RULE(shift_expr, c(term), list, rule(arith_expr), rule(shift_op)) DEF_RULE_NC(shift_op, or(2), tok(OP_DBL_LESS), tok(OP_DBL_MORE)) DEF_RULE(arith_expr, c(term), list, rule(term), rule(arith_op)) @@ -290,8 +290,8 @@ DEF_RULE(trailer_period, c(trailer_period), and(2), tok(DEL_PERIOD), tok(NAME)) #if MICROPY_PY_BUILTINS_SLICE DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(subscript), tok(DEL_COMMA)) DEF_RULE_NC(subscript, or(2), rule(subscript_3), rule(subscript_2)) -DEF_RULE(subscript_2, c(subscript_2), and_ident(2), rule(test), opt_rule(subscript_3)) -DEF_RULE(subscript_3, c(subscript_3), and(2), tok(DEL_COLON), opt_rule(subscript_3b)) +DEF_RULE(subscript_2, c(subscript), and_ident(2), rule(test), opt_rule(subscript_3)) +DEF_RULE(subscript_3, c(subscript), and(2), tok(DEL_COLON), opt_rule(subscript_3b)) DEF_RULE_NC(subscript_3b, or(2), rule(subscript_3c), rule(subscript_3d)) DEF_RULE_NC(subscript_3c, and(2), tok(DEL_COLON), opt_rule(test)) DEF_RULE_NC(subscript_3d, and_ident(2), rule(test), opt_rule(sliceop)) diff --git a/python/src/py/lexer.c b/python/src/py/lexer.c index 6017d69d6..e161700b1 100644 --- a/python/src/py/lexer.c +++ b/python/src/py/lexer.c @@ -590,6 +590,8 @@ void mp_lexer_to_next(mp_lexer_t *lex) { } vstr_add_char(&lex->vstr, CUR_CHAR(lex)); next_char(lex); + } else if (is_char(lex, '_')) { + next_char(lex); } else { break; } diff --git a/python/src/py/makeqstrdata.py b/python/src/py/makeqstrdata.py index 3c0a60909..060ebb7fd 100644 --- a/python/src/py/makeqstrdata.py +++ b/python/src/py/makeqstrdata.py @@ -51,6 +51,176 @@ codepoint2name[ord('^')] = 'caret' codepoint2name[ord('|')] = 'pipe' codepoint2name[ord('~')] = 'tilde' +# static qstrs, should be sorted + +static_qstr_list = [ + "", + "__dir__", # Put __dir__ after empty qstr for builtin dir() to work + "\n", + " ", + "*", + "/", + "", + "_", + "__call__", + "__class__", + "__delitem__", + "__enter__", + "__exit__", + "__getattr__", + "__getitem__", + "__hash__", + "__init__", + "__int__", + "__iter__", + "__len__", + "__main__", + "__module__", + "__name__", + "__new__", + "__next__", + "__qualname__", + "__repr__", + "__setitem__", + "__str__", + "ArithmeticError", + "AssertionError", + "AttributeError", + "BaseException", + "EOFError", + "Ellipsis", + "Exception", + "GeneratorExit", + "ImportError", + "IndentationError", + "IndexError", + "KeyError", + "KeyboardInterrupt", + "LookupError", + "MemoryError", + "NameError", + "NoneType", + "NotImplementedError", + "OSError", + "OverflowError", + "RuntimeError", + "StopIteration", + "SyntaxError", + "SystemExit", + "TypeError", + "ValueError", + "ZeroDivisionError", + "abs", + "all", + "any", + "append", + "args", + "bool", + "builtins", + "bytearray", + "bytecode", + "bytes", + "callable", + "chr", + "classmethod", + "clear", + "close", + "const", + "copy", + "count", + "dict", + "dir", + "divmod", + "end", + "endswith", + "eval", + "exec", + "extend", + "find", + "format", + "from_bytes", + "get", + "getattr", + "globals", + "hasattr", + "hash", + "id", + "index", + "insert", + "int", + "isalpha", + "isdigit", + "isinstance", + "islower", + "isspace", + "issubclass", + "isupper", + "items", + "iter", + "join", + "key", + "keys", + "len", + "list", + "little", + "locals", + "lower", + "lstrip", + "main", + "map", + "micropython", + "next", + "object", + "open", + "ord", + "pop", + "popitem", + "pow", + "print", + "range", + "read", + "readinto", + "readline", + "remove", + "replace", + "repr", + "reverse", + "rfind", + "rindex", + "round", + "rsplit", + "rstrip", + "self", + "send", + "sep", + "set", + "setattr", + "setdefault", + "sort", + "sorted", + "split", + "start", + "startswith", + "staticmethod", + "step", + "stop", + "str", + "strip", + "sum", + "super", + "throw", + "to_bytes", + "tuple", + "type", + "update", + "upper", + "utf-8", + "value", + "values", + "write", + "zip", +] + # this must match the equivalent function in qstr.c def compute_hash(qstr, bytes_hash): hash = 5381 @@ -70,9 +240,22 @@ def qstr_escape(qst): return re.sub(r'[^A-Za-z0-9_]', esc_char, qst) def parse_input_headers(infiles): - # read the qstrs in from the input files qcfgs = {} qstrs = {} + + # add static qstrs + for qstr in static_qstr_list: + # work out the corresponding qstr name + ident = qstr_escape(qstr) + + # don't add duplicates + assert ident not in qstrs + + # add the qstr to the list, with order number to retain original order in file + order = len(qstrs) - 300000 + qstrs[ident] = (order, ident, qstr) + + # read the qstrs in from the input files for infile in infiles: with open(infile, 'rt') as f: for line in f: diff --git a/python/src/py/malloc.c b/python/src/py/malloc.c index ba5c952f3..f8ed1487a 100644 --- a/python/src/py/malloc.c +++ b/python/src/py/malloc.c @@ -62,6 +62,13 @@ #define realloc(ptr, n) gc_realloc(ptr, n, true) #define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv) #else + +// GC is disabled. Use system malloc/realloc/free. + +#if MICROPY_ENABLE_FINALISER +#error MICROPY_ENABLE_FINALISER requires MICROPY_ENABLE_GC +#endif + STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) { if (allow_move) { return realloc(ptr, n_bytes); @@ -72,6 +79,7 @@ STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) { return NULL; } } + #endif // MICROPY_ENABLE_GC void *m_malloc(size_t num_bytes) { diff --git a/python/src/py/map.c b/python/src/py/map.c index 6abf4853f..5f3c6e547 100644 --- a/python/src/py/map.c +++ b/python/src/py/map.c @@ -150,9 +150,9 @@ mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t // Work out if we can compare just pointers bool compare_only_ptrs = map->all_keys_are_qstrs; if (compare_only_ptrs) { - if (MP_OBJ_IS_QSTR(index)) { + if (mp_obj_is_qstr(index)) { // Index is a qstr, so can just do ptr comparison. - } else if (MP_OBJ_IS_TYPE(index, &mp_type_str)) { + } else if (mp_obj_is_type(index, &mp_type_str)) { // Index is a non-interned string. // We can either intern the string, or force a full equality comparison. // We chose the latter, since interning costs time and potentially RAM, @@ -197,7 +197,7 @@ mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t } mp_map_elem_t *elem = map->table + map->used++; elem->key = index; - if (!MP_OBJ_IS_QSTR(index)) { + if (!mp_obj_is_qstr(index)) { map->all_keys_are_qstrs = 0; } return elem; @@ -218,7 +218,7 @@ mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t // get hash of index, with fast path for common case of qstr mp_uint_t hash; - if (MP_OBJ_IS_QSTR(index)) { + if (mp_obj_is_qstr(index)) { hash = qstr_hash(MP_OBJ_QSTR_VALUE(index)); } else { hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index)); @@ -238,7 +238,7 @@ mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t } avail_slot->key = index; avail_slot->value = MP_OBJ_NULL; - if (!MP_OBJ_IS_QSTR(index)) { + if (!mp_obj_is_qstr(index)) { map->all_keys_are_qstrs = 0; } return avail_slot; @@ -278,7 +278,7 @@ mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t map->used++; avail_slot->key = index; avail_slot->value = MP_OBJ_NULL; - if (!MP_OBJ_IS_QSTR(index)) { + if (!mp_obj_is_qstr(index)) { map->all_keys_are_qstrs = 0; } return avail_slot; @@ -395,7 +395,7 @@ mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t looku mp_obj_t mp_set_remove_first(mp_set_t *set) { for (size_t pos = 0; pos < set->alloc; pos++) { - if (MP_SET_SLOT_IS_FILLED(set, pos)) { + if (mp_set_slot_is_filled(set, pos)) { mp_obj_t elem = set->table[pos]; // delete element set->used--; @@ -423,13 +423,13 @@ void mp_set_clear(mp_set_t *set) { #if defined(DEBUG_PRINT) && DEBUG_PRINT void mp_map_dump(mp_map_t *map) { for (size_t i = 0; i < map->alloc; i++) { - if (map->table[i].key != NULL) { + if (map->table[i].key != MP_OBJ_NULL) { mp_obj_print(map->table[i].key, PRINT_REPR); } else { - printf("(nil)"); + DEBUG_printf("(nil)"); } - printf(": %p\n", map->table[i].value); + DEBUG_printf(": %p\n", map->table[i].value); } - printf("---\n"); + DEBUG_printf("---\n"); } #endif diff --git a/python/src/py/misc.h b/python/src/py/misc.h index 72560da1e..0aa4a5d2c 100644 --- a/python/src/py/misc.h +++ b/python/src/py/misc.h @@ -47,8 +47,11 @@ typedef unsigned int uint; #endif // Classical double-indirection stringification of preprocessor macro's value -#define _MP_STRINGIFY(x) #x -#define MP_STRINGIFY(x) _MP_STRINGIFY(x) +#define MP_STRINGIFY_HELPER(x) #x +#define MP_STRINGIFY(x) MP_STRINGIFY_HELPER(x) + +// Static assertion macro +#define MP_STATIC_ASSERT(cond) ((void)sizeof(char[1 - 2 * !(cond)])) /** memory allocation ******************************************/ diff --git a/python/src/py/modarray.c b/python/src/py/modarray.c index c0cdca928..de84fc858 100644 --- a/python/src/py/modarray.c +++ b/python/src/py/modarray.c @@ -40,4 +40,6 @@ const mp_obj_module_t mp_module_array = { .globals = (mp_obj_dict_t*)&mp_module_array_globals, }; +MP_REGISTER_MODULE(MP_QSTR_array, mp_module_array, MICROPY_PY_ARRAY); + #endif diff --git a/python/src/py/modbuiltins.c b/python/src/py/modbuiltins.c index 0d511338b..a65f3beec 100644 --- a/python/src/py/modbuiltins.c +++ b/python/src/py/modbuiltins.c @@ -178,7 +178,7 @@ STATIC mp_obj_t mp_builtin_dir(size_t n_args, const mp_obj_t *args) { // Make a list of names in the local namespace mp_obj_dict_t *dict = mp_locals_get(); for (size_t i = 0; i < dict->map.alloc; i++) { - if (MP_MAP_SLOT_IS_FILLED(&dict->map, i)) { + if (mp_map_slot_is_filled(&dict->map, i)) { mp_obj_list_append(dir, dict->map.table[i].key); } } @@ -217,7 +217,12 @@ STATIC mp_obj_t mp_builtin_hash(mp_obj_t o_in) { MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hash_obj, mp_builtin_hash); STATIC mp_obj_t mp_builtin_hex(mp_obj_t o_in) { + #if MICROPY_PY_BUILTINS_STR_OP_MODULO return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_x), o_in); + #else + mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_x_brace_close_), o_in }; + return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL); + #endif } MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hex_obj, mp_builtin_hex); @@ -311,6 +316,22 @@ MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_min_obj, 1, mp_builtin_min); #endif +#if MICROPY_PY_BUILTINS_NEXT2 +STATIC mp_obj_t mp_builtin_next(size_t n_args, const mp_obj_t *args) { + if (n_args == 1) { + mp_obj_t ret = mp_iternext_allow_raise(args[0]); + if (ret == MP_OBJ_STOP_ITERATION) { + nlr_raise(mp_obj_new_exception(&mp_type_StopIteration)); + } else { + return ret; + } + } else { + mp_obj_t ret = mp_iternext(args[0]); + return ret == MP_OBJ_STOP_ITERATION ? args[1] : ret; + } +} +MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj, 1, 2, mp_builtin_next); +#else STATIC mp_obj_t mp_builtin_next(mp_obj_t o) { mp_obj_t ret = mp_iternext_allow_raise(o); if (ret == MP_OBJ_STOP_ITERATION) { @@ -320,9 +341,15 @@ STATIC mp_obj_t mp_builtin_next(mp_obj_t o) { } } MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_next_obj, mp_builtin_next); +#endif STATIC mp_obj_t mp_builtin_oct(mp_obj_t o_in) { + #if MICROPY_PY_BUILTINS_STR_OP_MODULO return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_o), o_in); + #else + mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_o_brace_close_), o_in }; + return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL); + #endif } MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_oct_obj, mp_builtin_oct); @@ -330,7 +357,7 @@ STATIC mp_obj_t mp_builtin_ord(mp_obj_t o_in) { size_t len; const byte *str = (const byte*)mp_obj_str_get_data(o_in, &len); #if MICROPY_PY_BUILTINS_STR_UNICODE - if (MP_OBJ_IS_STR(o_in)) { + if (mp_obj_is_str(o_in)) { len = utf8_charlen(str, len); if (len == 1) { return mp_obj_new_int(utf8_get_char(str)); @@ -386,7 +413,7 @@ STATIC mp_obj_t mp_builtin_print(size_t n_args, const mp_obj_t *pos_args, mp_map mp_arg_parse_all(0, NULL, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, u.args); #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES - // TODO file may not be a concrete object (eg it could be a small-int) + mp_get_stream_raise(u.args[ARG_file].u_obj, MP_STREAM_OP_WRITE); mp_print_t print = {MP_OBJ_TO_PTR(u.args[ARG_file].u_obj), mp_stream_write_adaptor}; #endif @@ -444,8 +471,37 @@ MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_repr_obj, mp_builtin_repr); STATIC mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) { mp_obj_t o_in = args[0]; - if (MP_OBJ_IS_INT(o_in)) { - return o_in; + if (mp_obj_is_int(o_in)) { + if (n_args <= 1) { + return o_in; + } + + #if !MICROPY_PY_BUILTINS_ROUND_INT + mp_raise_NotImplementedError(NULL); + #else + mp_int_t num_dig = mp_obj_get_int(args[1]); + if (num_dig >= 0) { + return o_in; + } + + mp_obj_t mult = mp_binary_op(MP_BINARY_OP_POWER, MP_OBJ_NEW_SMALL_INT(10), MP_OBJ_NEW_SMALL_INT(-num_dig)); + mp_obj_t half_mult = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, mult, MP_OBJ_NEW_SMALL_INT(2)); + mp_obj_t modulo = mp_binary_op(MP_BINARY_OP_MODULO, o_in, mult); + mp_obj_t rounded = mp_binary_op(MP_BINARY_OP_SUBTRACT, o_in, modulo); + if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, half_mult, modulo))) { + return rounded; + } else if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, modulo, half_mult))) { + return mp_binary_op(MP_BINARY_OP_ADD, rounded, mult); + } else { + // round to even number + mp_obj_t floor = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, o_in, mult); + if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_AND, floor, MP_OBJ_NEW_SMALL_INT(1)))) { + return mp_binary_op(MP_BINARY_OP_ADD, rounded, mult); + } else { + return rounded; + } + } + #endif } #if MICROPY_PY_BUILTINS_FLOAT mp_float_t val = mp_obj_get_float(o_in); diff --git a/python/src/py/modio.c b/python/src/py/modio.c index 3a5c69c4c..0f4a4326c 100644 --- a/python/src/py/modio.c +++ b/python/src/py/modio.c @@ -30,6 +30,8 @@ #include "py/runtime.h" #include "py/builtin.h" #include "py/stream.h" +#include "py/binary.h" +#include "py/objarray.h" #include "py/objstringio.h" #include "py/frozenmod.h" @@ -38,6 +40,70 @@ extern const mp_obj_type_t mp_type_fileio; extern const mp_obj_type_t mp_type_textio; +#if MICROPY_PY_IO_IOBASE + +STATIC const mp_obj_type_t mp_type_iobase; + +STATIC const mp_obj_base_t iobase_singleton = {&mp_type_iobase}; + +STATIC mp_obj_t iobase_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) { + (void)type; + (void)n_args; + (void)n_kw; + (void)args; + return MP_OBJ_FROM_PTR(&iobase_singleton); +} + +STATIC mp_uint_t iobase_read_write(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode, qstr qst) { + mp_obj_t dest[3]; + mp_load_method(obj, qst, dest); + mp_obj_array_t ar = {{&mp_type_bytearray}, BYTEARRAY_TYPECODE, 0, size, buf}; + dest[2] = MP_OBJ_FROM_PTR(&ar); + mp_obj_t ret = mp_call_method_n_kw(1, 0, dest); + if (ret == mp_const_none) { + *errcode = MP_EAGAIN; + return MP_STREAM_ERROR; + } else { + return mp_obj_get_int(ret); + } +} +STATIC mp_uint_t iobase_read(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode) { + return iobase_read_write(obj, buf, size, errcode, MP_QSTR_readinto); +} + +STATIC mp_uint_t iobase_write(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode) { + return iobase_read_write(obj, (void*)buf, size, errcode, MP_QSTR_write); +} + +STATIC mp_uint_t iobase_ioctl(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode) { + mp_obj_t dest[4]; + mp_load_method(obj, MP_QSTR_ioctl, dest); + dest[2] = mp_obj_new_int_from_uint(request); + dest[3] = mp_obj_new_int_from_uint(arg); + mp_int_t ret = mp_obj_get_int(mp_call_method_n_kw(2, 0, dest)); + if (ret >= 0) { + return ret; + } else { + *errcode = -ret; + return MP_STREAM_ERROR; + } +} + +STATIC const mp_stream_p_t iobase_p = { + .read = iobase_read, + .write = iobase_write, + .ioctl = iobase_ioctl, +}; + +STATIC const mp_obj_type_t mp_type_iobase = { + { &mp_type_type }, + .name = MP_QSTR_IOBase, + .make_new = iobase_make_new, + .protocol = &iobase_p, +}; + +#endif // MICROPY_PY_IO_IOBASE + #if MICROPY_PY_IO_BUFFEREDWRITER typedef struct _mp_obj_bufwriter_t { mp_obj_base_t base; @@ -81,6 +147,7 @@ STATIC mp_uint_t bufwriter_write(mp_obj_t self_in, const void *buf, mp_uint_t si buf = (byte*)buf + rem; size -= rem; mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->alloc, errcode); + (void)out_sz; if (*errcode != 0) { return MP_STREAM_ERROR; } @@ -99,6 +166,7 @@ STATIC mp_obj_t bufwriter_flush(mp_obj_t self_in) { if (self->len != 0) { int err; mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->len, &err); + (void)out_sz; // TODO: try to recover from a case of non-blocking stream, e.g. move // remaining chunk to the beginning of buffer. assert(out_sz == self->len); @@ -187,6 +255,9 @@ STATIC const mp_rom_map_elem_t mp_module_io_globals_table[] = { // Note: mp_builtin_open_obj should be defined by port, it's not // part of the core. { MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&mp_builtin_open_obj) }, + #if MICROPY_PY_IO_IOBASE + { MP_ROM_QSTR(MP_QSTR_IOBase), MP_ROM_PTR(&mp_type_iobase) }, + #endif #if MICROPY_PY_IO_RESOURCE_STREAM { MP_ROM_QSTR(MP_QSTR_resource_stream), MP_ROM_PTR(&resource_stream_obj) }, #endif diff --git a/python/src/py/modmath.c b/python/src/py/modmath.c index 7eda7594d..d106f240c 100644 --- a/python/src/py/modmath.c +++ b/python/src/py/modmath.c @@ -169,7 +169,7 @@ MATH_FUN_1(gamma, tgamma) // lgamma(x): return the natural logarithm of the gamma function of x MATH_FUN_1(lgamma, lgamma) #endif -//TODO: factorial, fsum +//TODO: fsum // Function that takes a variable number of arguments @@ -187,7 +187,7 @@ STATIC mp_obj_t mp_math_log(size_t n_args, const mp_obj_t *args) { if (base <= (mp_float_t)0.0) { math_error(); } else if (base == (mp_float_t)1.0) { - mp_raise_msg(&mp_type_ZeroDivisionError, "division by zero"); + mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero"); } return mp_obj_new_float(l / MICROPY_FLOAT_C_FUN(log)(base)); } @@ -232,6 +232,70 @@ STATIC mp_obj_t mp_math_degrees(mp_obj_t x_obj) { } STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_degrees_obj, mp_math_degrees); +#if MICROPY_PY_MATH_FACTORIAL + +#if MICROPY_OPT_MATH_FACTORIAL + +// factorial(x): slightly efficient recursive implementation +STATIC mp_obj_t mp_math_factorial_inner(mp_uint_t start, mp_uint_t end) { + if (start == end) { + return mp_obj_new_int(start); + } else if (end - start == 1) { + return mp_binary_op(MP_BINARY_OP_MULTIPLY, MP_OBJ_NEW_SMALL_INT(start), MP_OBJ_NEW_SMALL_INT(end)); + } else if (end - start == 2) { + mp_obj_t left = MP_OBJ_NEW_SMALL_INT(start); + mp_obj_t middle = MP_OBJ_NEW_SMALL_INT(start + 1); + mp_obj_t right = MP_OBJ_NEW_SMALL_INT(end); + mp_obj_t tmp = mp_binary_op(MP_BINARY_OP_MULTIPLY, left, middle); + return mp_binary_op(MP_BINARY_OP_MULTIPLY, tmp, right); + } else { + mp_uint_t middle = start + ((end - start) >> 1); + mp_obj_t left = mp_math_factorial_inner(start, middle); + mp_obj_t right = mp_math_factorial_inner(middle + 1, end); + return mp_binary_op(MP_BINARY_OP_MULTIPLY, left, right); + } +} +STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) { + mp_int_t max = mp_obj_get_int(x_obj); + if (max < 0) { + mp_raise_msg(&mp_type_ValueError, "negative factorial"); + } else if (max == 0) { + return MP_OBJ_NEW_SMALL_INT(1); + } + return mp_math_factorial_inner(1, max); +} + +#else + +// factorial(x): squared difference implementation +// based on http://www.luschny.de/math/factorial/index.html +STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) { + mp_int_t max = mp_obj_get_int(x_obj); + if (max < 0) { + mp_raise_msg(&mp_type_ValueError, "negative factorial"); + } else if (max <= 1) { + return MP_OBJ_NEW_SMALL_INT(1); + } + mp_int_t h = max >> 1; + mp_int_t q = h * h; + mp_int_t r = q << 1; + if (max & 1) { + r *= max; + } + mp_obj_t prod = MP_OBJ_NEW_SMALL_INT(r); + for (mp_int_t num = 1; num < max - 2; num += 2) { + q -= num; + prod = mp_binary_op(MP_BINARY_OP_MULTIPLY, prod, MP_OBJ_NEW_SMALL_INT(q)); + } + return prod; +} + +#endif + +STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_factorial_obj, mp_math_factorial); + +#endif + STATIC const mp_rom_map_elem_t mp_module_math_globals_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_math) }, { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e }, @@ -274,6 +338,9 @@ STATIC const mp_rom_map_elem_t mp_module_math_globals_table[] = { { MP_ROM_QSTR(MP_QSTR_trunc), MP_ROM_PTR(&mp_math_trunc_obj) }, { MP_ROM_QSTR(MP_QSTR_radians), MP_ROM_PTR(&mp_math_radians_obj) }, { MP_ROM_QSTR(MP_QSTR_degrees), MP_ROM_PTR(&mp_math_degrees_obj) }, + #if MICROPY_PY_MATH_FACTORIAL + { MP_ROM_QSTR(MP_QSTR_factorial), MP_ROM_PTR(&mp_math_factorial_obj) }, + #endif #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS { MP_ROM_QSTR(MP_QSTR_erf), MP_ROM_PTR(&mp_math_erf_obj) }, { MP_ROM_QSTR(MP_QSTR_erfc), MP_ROM_PTR(&mp_math_erfc_obj) }, diff --git a/python/src/py/modsys.c b/python/src/py/modsys.c index 609f8b454..343451732 100644 --- a/python/src/py/modsys.c +++ b/python/src/py/modsys.c @@ -37,8 +37,6 @@ #if MICROPY_PY_SYS -#include "genhdr/mpversion.h" - // defined per port; type of these is irrelevant, just need pointer extern struct _mp_dummy_t mp_sys_stdin_obj; extern struct _mp_dummy_t mp_sys_stdout_obj; @@ -106,7 +104,8 @@ STATIC mp_obj_t mp_sys_print_exception(size_t n_args, const mp_obj_t *args) { #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES void *stream_obj = &mp_sys_stdout_obj; if (n_args > 1) { - stream_obj = MP_OBJ_TO_PTR(args[1]); // XXX may fail + mp_get_stream_raise(args[1], MP_STREAM_OP_WRITE); + stream_obj = MP_OBJ_TO_PTR(args[1]); } mp_print_t print = {stream_obj, mp_stream_write_adaptor}; diff --git a/python/src/py/modthread.c b/python/src/py/modthread.c index 61ada5035..91237a72b 100644 --- a/python/src/py/modthread.c +++ b/python/src/py/modthread.c @@ -242,7 +242,7 @@ STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args) th_args->n_kw = map->used; // copy across the keyword arguments for (size_t i = 0, n = pos_args_len; i < map->alloc; ++i) { - if (MP_MAP_SLOT_IS_FILLED(map, i)) { + if (mp_map_slot_is_filled(map, i)) { th_args->args[n++] = map->table[i].key; th_args->args[n++] = map->table[i].value; } diff --git a/python/src/py/mpconfig.h b/python/src/py/mpconfig.h index 532b54ab0..219f8de44 100644 --- a/python/src/py/mpconfig.h +++ b/python/src/py/mpconfig.h @@ -26,6 +26,23 @@ #ifndef MICROPY_INCLUDED_PY_MPCONFIG_H #define MICROPY_INCLUDED_PY_MPCONFIG_H +// Current version of MicroPython +#define MICROPY_VERSION_MAJOR 1 +#define MICROPY_VERSION_MINOR 11 +#define MICROPY_VERSION_MICRO 0 + +// Combined version as a 32-bit number for convenience +#define MICROPY_VERSION ( \ + MICROPY_VERSION_MAJOR << 16 \ + | MICROPY_VERSION_MINOR << 8 \ + | MICROPY_VERSION_MICRO) + +// String version +#define MICROPY_VERSION_STRING \ + MP_STRINGIFY(MICROPY_VERSION_MAJOR) "." \ + MP_STRINGIFY(MICROPY_VERSION_MINOR) "." \ + MP_STRINGIFY(MICROPY_VERSION_MICRO) + // This file contains default configuration settings for MicroPython. // You can override any of the options below using mpconfigport.h file // located in a directory of your port. @@ -106,6 +123,15 @@ #define MICROPY_ALLOC_GC_STACK_SIZE (64) #endif +// The C-type to use for entries in the GC stack. By default it allows the +// heap to be as large as the address space, but the bit-width of this type can +// be reduced to save memory when the heap is small enough. The type must be +// big enough to index all blocks in the heap, which is set by +// heap-size-in-bytes / MICROPY_BYTES_PER_GC_BLOCK. +#ifndef MICROPY_GC_STACK_ENTRY_TYPE +#define MICROPY_GC_STACK_ENTRY_TYPE size_t +#endif + // Be conservative and always clear to zero newly (re)allocated memory in the GC. // This helps eliminate stray pointers that hold on to memory that's no longer // used. It decreases performance due to unnecessary memory clearing. @@ -330,6 +356,11 @@ #define MICROPY_COMP_CONST_FOLDING (1) #endif +// Whether to enable optimisations for constant literals, eg OrderedDict +#ifndef MICROPY_COMP_CONST_LITERAL +#define MICROPY_COMP_CONST_LITERAL (1) +#endif + // Whether to enable lookup of constants in modules; eg module.CONST #ifndef MICROPY_COMP_MODULE_CONST #define MICROPY_COMP_MODULE_CONST (0) @@ -366,6 +397,11 @@ #define MICROPY_MEM_STATS (0) #endif +// The mp_print_t printer used for debugging output +#ifndef MICROPY_DEBUG_PRINTER +#define MICROPY_DEBUG_PRINTER (&mp_plat_print) +#endif + // Whether to build functions that print debugging info: // mp_bytecode_print // mp_parse_node_print @@ -378,6 +414,16 @@ #define MICROPY_DEBUG_VERBOSE (0) #endif +// Whether to enable debugging versions of MP_OBJ_NULL/STOP_ITERATION/SENTINEL +#ifndef MICROPY_DEBUG_MP_OBJ_SENTINELS +#define MICROPY_DEBUG_MP_OBJ_SENTINELS (0) +#endif + +// Whether to enable a simple VM stack overflow check +#ifndef MICROPY_DEBUG_VM_STACK_OVERFLOW +#define MICROPY_DEBUG_VM_STACK_OVERFLOW (0) +#endif + /*****************************************************************************/ /* Optimisations */ @@ -402,6 +448,12 @@ #define MICROPY_OPT_MPZ_BITWISE (0) #endif + +// Whether math.factorial is large, fast and recursive (1) or small and slow (0). +#ifndef MICROPY_OPT_MATH_FACTORIAL +#define MICROPY_OPT_MATH_FACTORIAL (0) +#endif + /*****************************************************************************/ /* Python internal features */ @@ -422,6 +474,11 @@ #define MICROPY_READER_VFS (0) #endif +// Whether any readers have been defined +#ifndef MICROPY_HAS_FILE_READER +#define MICROPY_HAS_FILE_READER (MICROPY_READER_POSIX || MICROPY_READER_VFS) +#endif + // Hook for the VM at the start of the opcode loop (can contain variable // definitions usable by the other hook functions) #ifndef MICROPY_VM_HOOK_INIT @@ -551,6 +608,11 @@ typedef long long mp_longint_impl_t; #define MICROPY_WARNINGS (0) #endif +// Whether to support warning categories +#ifndef MICROPY_WARNINGS_CATEGORY +#define MICROPY_WARNINGS_CATEGORY (0) +#endif + // This macro is used when printing runtime warnings and errors #ifndef MICROPY_ERROR_PRINTER #define MICROPY_ERROR_PRINTER (&mp_plat_print) @@ -622,6 +684,11 @@ typedef double mp_float_t; #define MICROPY_MODULE_BUILTIN_INIT (0) #endif +// Whether to support module-level __getattr__ (see PEP 562) +#ifndef MICROPY_MODULE_GETATTR +#define MICROPY_MODULE_GETATTR (0) +#endif + // Whether module weak links are supported #ifndef MICROPY_MODULE_WEAK_LINKS #define MICROPY_MODULE_WEAK_LINKS (0) @@ -681,6 +748,16 @@ typedef double mp_float_t; #define MICROPY_VFS (0) #endif +// Support for VFS POSIX component, to mount a POSIX filesystem within VFS +#ifndef MICROPY_VFS +#define MICROPY_VFS_POSIX (0) +#endif + +// Support for VFS FAT component, to mount a FAT filesystem within VFS +#ifndef MICROPY_VFS +#define MICROPY_VFS_FAT (0) +#endif + /*****************************************************************************/ /* Fine control over Python builtins, classes, modules, etc */ @@ -696,14 +773,16 @@ typedef double mp_float_t; #define MICROPY_PY_FUNCTION_ATTRS (0) #endif -// Whether to support descriptors (__get__ and __set__) -// This costs some code size and makes all load attrs and store attrs slow +// Whether to support the descriptors __get__, __set__, __delete__ +// This costs some code size and makes load/store/delete of instance +// attributes slower for the classes that use this feature #ifndef MICROPY_PY_DESCRIPTORS #define MICROPY_PY_DESCRIPTORS (0) #endif // Whether to support class __delattr__ and __setattr__ methods -// This costs some code size and makes all del attrs and store attrs slow +// This costs some code size and makes store/delete of instance +// attributes slower for the classes that use this feature #ifndef MICROPY_PY_DELATTR_SETATTR #define MICROPY_PY_DELATTR_SETATTR (0) #endif @@ -742,6 +821,16 @@ typedef double mp_float_t; #define MICROPY_PY_BUILTINS_STR_CENTER (0) #endif +// Whether str.count() method provided +#ifndef MICROPY_PY_BUILTINS_STR_COUNT +#define MICROPY_PY_BUILTINS_STR_COUNT (1) +#endif + +// Whether str % (...) formatting operator provided +#ifndef MICROPY_PY_BUILTINS_STR_OP_MODULO +#define MICROPY_PY_BUILTINS_STR_OP_MODULO (1) +#endif + // Whether str.partition()/str.rpartition() method provided #ifndef MICROPY_PY_BUILTINS_STR_PARTITION #define MICROPY_PY_BUILTINS_STR_PARTITION (0) @@ -757,11 +846,21 @@ typedef double mp_float_t; #define MICROPY_PY_BUILTINS_BYTEARRAY (1) #endif +// Whether to support dict.fromkeys() class method +#ifndef MICROPY_PY_BUILTINS_DICT_FROMKEYS +#define MICROPY_PY_BUILTINS_DICT_FROMKEYS (1) +#endif + // Whether to support memoryview object #ifndef MICROPY_PY_BUILTINS_MEMORYVIEW #define MICROPY_PY_BUILTINS_MEMORYVIEW (0) #endif +// Whether to support memoryview.itemsize attribute +#ifndef MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE +#define MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE (0) +#endif + // Whether to support set object #ifndef MICROPY_PY_BUILTINS_SET #define MICROPY_PY_BUILTINS_SET (1) @@ -802,6 +901,16 @@ typedef double mp_float_t; #define MICROPY_PY_BUILTINS_RANGE_BINOP (0) #endif +// Support for callling next() with second argument +#ifndef MICROPY_PY_BUILTINS_NEXT2 +#define MICROPY_PY_BUILTINS_NEXT2 (0) +#endif + +// Whether to support rounding of integers (incl bignum); eg round(123,-1)=120 +#ifndef MICROPY_PY_BUILTINS_ROUND_INT +#define MICROPY_PY_BUILTINS_ROUND_INT (0) +#endif + // Whether to support timeout exceptions (like socket.timeout) #ifndef MICROPY_PY_BUILTINS_TIMEOUTERROR #define MICROPY_PY_BUILTINS_TIMEOUTERROR (0) @@ -961,6 +1070,11 @@ typedef double mp_float_t; #define MICROPY_PY_MATH_SPECIAL_FUNCTIONS (0) #endif +// Whether to provide math.factorial function +#ifndef MICROPY_PY_MATH_FACTORIAL +#define MICROPY_PY_MATH_FACTORIAL (0) +#endif + // Whether to provide "cmath" module #ifndef MICROPY_PY_CMATH #define MICROPY_PY_CMATH (0) @@ -981,6 +1095,11 @@ typedef double mp_float_t; #define MICROPY_PY_IO (1) #endif +// Whether to provide "io.IOBase" class to support user streams +#ifndef MICROPY_PY_IO_IOBASE +#define MICROPY_PY_IO_IOBASE (0) +#endif + // Whether to provide "uio.resource_stream()" function with // the semantics of CPython's pkg_resources.resource_stream() // (allows to access binary resources in frozen source packages). @@ -1108,6 +1227,12 @@ typedef double mp_float_t; #define MICROPY_PY_UCTYPES (0) #endif +// Whether to provide SHORT, INT, LONG, etc. types in addition to +// exact-bitness types like INT16, INT32, etc. +#ifndef MICROPY_PY_UCTYPES_NATIVE_C_TYPES +#define MICROPY_PY_UCTYPES_NATIVE_C_TYPES (1) +#endif + #ifndef MICROPY_PY_UZLIB #define MICROPY_PY_UZLIB (0) #endif @@ -1120,6 +1245,18 @@ typedef double mp_float_t; #define MICROPY_PY_URE (0) #endif +#ifndef MICROPY_PY_URE_MATCH_GROUPS +#define MICROPY_PY_URE_MATCH_GROUPS (0) +#endif + +#ifndef MICROPY_PY_URE_MATCH_SPAN_START_END +#define MICROPY_PY_URE_MATCH_SPAN_START_END (0) +#endif + +#ifndef MICROPY_PY_URE_SUB +#define MICROPY_PY_URE_SUB (0) +#endif + #ifndef MICROPY_PY_UHEAPQ #define MICROPY_PY_UHEAPQ (0) #endif @@ -1133,6 +1270,31 @@ typedef double mp_float_t; #define MICROPY_PY_UHASHLIB (0) #endif +#ifndef MICROPY_PY_UHASHLIB_MD5 +#define MICROPY_PY_UHASHLIB_MD5 (0) +#endif + +#ifndef MICROPY_PY_UHASHLIB_SHA1 +#define MICROPY_PY_UHASHLIB_SHA1 (0) +#endif + +#ifndef MICROPY_PY_UHASHLIB_SHA256 +#define MICROPY_PY_UHASHLIB_SHA256 (1) +#endif + +#ifndef MICROPY_PY_UCRYPTOLIB +#define MICROPY_PY_UCRYPTOLIB (0) +#endif + +// Depends on MICROPY_PY_UCRYPTOLIB +#ifndef MICROPY_PY_UCRYPTOLIB_CTR +#define MICROPY_PY_UCRYPTOLIB_CTR (0) +#endif + +#ifndef MICROPY_PY_UCRYPTOLIB_CONSTS +#define MICROPY_PY_UCRYPTOLIB_CONSTS (0) +#endif + #ifndef MICROPY_PY_UBINASCII #define MICROPY_PY_UBINASCII (0) #endif @@ -1174,8 +1336,8 @@ typedef double mp_float_t; #define MICROPY_PY_USSL_FINALISER (0) #endif -#ifndef MICROPY_PY_WEBSOCKET -#define MICROPY_PY_WEBSOCKET (0) +#ifndef MICROPY_PY_UWEBSOCKET +#define MICROPY_PY_UWEBSOCKET (0) #endif #ifndef MICROPY_PY_FRAMEBUF @@ -1189,17 +1351,17 @@ typedef double mp_float_t; /*****************************************************************************/ /* Hooks for a port to add builtins */ -// Additional builtin function definitions - see builtintables.c:builtin_object_table for format. +// Additional builtin function definitions - see modbuiltins.c:mp_module_builtins_globals_table for format. #ifndef MICROPY_PORT_BUILTINS #define MICROPY_PORT_BUILTINS #endif -// Additional builtin module definitions - see builtintables.c:builtin_module_table for format. +// Additional builtin module definitions - see objmodule.c:mp_builtin_module_table for format. #ifndef MICROPY_PORT_BUILTIN_MODULES #define MICROPY_PORT_BUILTIN_MODULES #endif -// Any module weak links - see builtintables.c:mp_builtin_module_weak_links_table. +// Any module weak links - see objmodule.c:mp_builtin_module_weak_links_table. #ifndef MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS #define MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS #endif @@ -1256,29 +1418,26 @@ typedef double mp_float_t; #elif defined(MP_ENDIANNESS_BIG) #define MP_ENDIANNESS_LITTLE (!MP_ENDIANNESS_BIG) #else - // Endiannes not defined by port so try to autodetect it. + // Endianness not defined by port so try to autodetect it. #if defined(__BYTE_ORDER__) #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define MP_ENDIANNESS_LITTLE (1) - #else + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define MP_ENDIANNESS_LITTLE (0) #endif - #elif defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined (_LITTLE_ENDIAN) - #define MP_ENDIANNESS_LITTLE (1) - #elif defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined (_BIG_ENDIAN) - #define MP_ENDIANNESS_LITTLE (0) #else #include #if defined(__BYTE_ORDER) #if __BYTE_ORDER == __LITTLE_ENDIAN #define MP_ENDIANNESS_LITTLE (1) - #else + #elif __BYTE_ORDER == __BIG_ENDIAN #define MP_ENDIANNESS_LITTLE (0) #endif - #else - #error endianness not defined and cannot detect it #endif #endif + #ifndef MP_ENDIANNESS_LITTLE + #error endianness not defined and cannot detect it + #endif #define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE) #endif @@ -1374,4 +1533,15 @@ typedef double mp_float_t; #endif #endif +// Warning categories are by default implemented as strings, though +// hook is left for a port to define them as something else. +#if MICROPY_WARNINGS_CATEGORY +# ifndef MP_WARN_CAT +# define MP_WARN_CAT(x) #x +# endif +#else +# undef MP_WARN_CAT +# define MP_WARN_CAT(x) (NULL) +#endif + #endif // MICROPY_INCLUDED_PY_MPCONFIG_H diff --git a/python/src/py/mpprint.c b/python/src/py/mpprint.c index c2e65301c..cf09f7bbe 100644 --- a/python/src/py/mpprint.c +++ b/python/src/py/mpprint.c @@ -207,7 +207,7 @@ int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char // If needed this function could be generalised to handle other values. assert(base == 2 || base == 8 || base == 10 || base == 16); - if (!MP_OBJ_IS_INT(x)) { + if (!mp_obj_is_int(x)) { // This will convert booleans to int, or raise an error for // non-integer types. x = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int(x)); @@ -503,22 +503,28 @@ int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) { chrs += mp_print_strn(print, str, prec, flags, fill, width); break; } + case 'd': { + mp_int_t val; + if (long_arg) { + val = va_arg(args, long int); + } else { + val = va_arg(args, int); + } + chrs += mp_print_int(print, val, 1, 10, 'a', flags, fill, width); + break; + } case 'u': - chrs += mp_print_int(print, va_arg(args, unsigned int), 0, 10, 'a', flags, fill, width); - break; - case 'd': - chrs += mp_print_int(print, va_arg(args, int), 1, 10, 'a', flags, fill, width); - break; case 'x': case 'X': { - char fmt_c = *fmt - 'X' + 'A'; + int base = 16 - ((*fmt + 1) & 6); // maps char u/x/X to base 10/16/16 + char fmt_c = (*fmt & 0xf0) - 'P' + 'A'; // maps char u/x/X to char a/a/A mp_uint_t val; if (long_arg) { val = va_arg(args, unsigned long int); } else { val = va_arg(args, unsigned int); } - chrs += mp_print_int(print, val, 0, 16, fmt_c, flags, fill, width); + chrs += mp_print_int(print, val, 0, base, fmt_c, flags, fill, width); break; } case 'p': diff --git a/python/src/py/mpstate.h b/python/src/py/mpstate.h index 16c4bf6c5..a9c2b32d6 100644 --- a/python/src/py/mpstate.h +++ b/python/src/py/mpstate.h @@ -46,6 +46,7 @@ typedef struct mp_dynamic_compiler_t { uint8_t small_int_bits; // must be <= host small_int_bits bool opt_cache_map_lookup_in_bytecode; bool py_builtins_str_unicode; + uint8_t native_arch; } mp_dynamic_compiler_t; extern mp_dynamic_compiler_t mp_dynamic_compiler; #endif @@ -77,7 +78,7 @@ typedef struct _mp_state_mem_t { byte *gc_pool_end; int gc_stack_overflow; - size_t gc_stack[MICROPY_ALLOC_GC_STACK_SIZE]; + MICROPY_GC_STACK_ENTRY_TYPE gc_stack[MICROPY_ALLOC_GC_STACK_SIZE]; uint16_t gc_lock_depth; // This variable controls auto garbage collection. If set to 0 then the @@ -105,10 +106,11 @@ typedef struct _mp_state_mem_t { // This structure hold runtime and VM information. It includes a section // which contains root pointers that must be scanned by the GC. typedef struct _mp_state_vm_t { - //////////////////////////////////////////////////////////// - // START ROOT POINTER SECTION - // everything that needs GC scanning must go here - // this must start at the start of this structure + // + // CONTINUE ROOT POINTER SECTION + // This must start at the start of this structure and follows + // the state in the mp_state_thread_t structure, continuing + // the root pointer section from there. // qstr_pool_t *last_pool; @@ -139,8 +141,6 @@ typedef struct _mp_state_vm_t { volatile mp_obj_t mp_pending_exception; #if MICROPY_ENABLE_SCHEDULER - volatile int16_t sched_state; - uint16_t sched_sp; mp_sched_item_t sched_stack[MICROPY_SCHEDULER_DEPTH]; #endif @@ -172,7 +172,6 @@ typedef struct _mp_state_vm_t { #if MICROPY_PY_OS_DUPTERM mp_obj_t dupterm_objs[MICROPY_PY_OS_DUPTERM]; - mp_obj_t dupterm_arr_obj; #endif #if MICROPY_PY_LWIP_SLIP @@ -208,6 +207,12 @@ typedef struct _mp_state_vm_t { mp_int_t mp_emergency_exception_buf_size; #endif + #if MICROPY_ENABLE_SCHEDULER + volatile int16_t sched_state; + uint8_t sched_len; + uint8_t sched_idx; + #endif + #if MICROPY_PY_THREAD_GIL // This is a global mutex used to make the VM/runtime thread-safe. mp_thread_mutex_t gil_mutex; @@ -217,11 +222,6 @@ typedef struct _mp_state_vm_t { // This structure holds state that is specific to a given thread. // Everything in this structure is scanned for root pointers. typedef struct _mp_state_thread_t { - mp_obj_dict_t *dict_locals; - mp_obj_dict_t *dict_globals; - - nlr_buf_t *nlr_top; // ROOT POINTER - // Stack top at the start of program char *stack_top; @@ -234,12 +234,21 @@ typedef struct _mp_state_thread_t { uint8_t *pystack_end; uint8_t *pystack_cur; #endif + + //////////////////////////////////////////////////////////// + // START ROOT POINTER SECTION + // Everything that needs GC scanning must start here, and + // is followed by state in the mp_state_vm_t structure. + // + + mp_obj_dict_t *dict_locals; + mp_obj_dict_t *dict_globals; + + nlr_buf_t *nlr_top; } mp_state_thread_t; // This structure combines the above 3 structures. // The order of the entries are important for root pointer scanning in the GC to work. -// Note: if this structure changes then revisit all nlr asm code since they -// have the offset of nlr_top hard-coded. typedef struct _mp_state_ctx_t { mp_state_thread_t thread; mp_state_vm_t vm; diff --git a/python/src/py/mpz.c b/python/src/py/mpz.c index fa5086862..8687092d0 100644 --- a/python/src/py/mpz.c +++ b/python/src/py/mpz.c @@ -1532,7 +1532,7 @@ mpz_t *mpz_mod(const mpz_t *lhs, const mpz_t *rhs) { // must return actual int value if it fits in mp_int_t mp_int_t mpz_hash(const mpz_t *z) { - mp_int_t val = 0; + mp_uint_t val = 0; mpz_dig_t *d = z->dig + z->len; while (d-- > z->dig) { diff --git a/python/src/py/nativeglue.c b/python/src/py/nativeglue.c index e63c2fcda..c810f31e6 100644 --- a/python/src/py/nativeglue.c +++ b/python/src/py/nativeglue.c @@ -41,12 +41,26 @@ #if MICROPY_EMIT_NATIVE +int mp_native_type_from_qstr(qstr qst) { + switch (qst) { + case MP_QSTR_object: return MP_NATIVE_TYPE_OBJ; + case MP_QSTR_bool: return MP_NATIVE_TYPE_BOOL; + case MP_QSTR_int: return MP_NATIVE_TYPE_INT; + case MP_QSTR_uint: return MP_NATIVE_TYPE_UINT; + case MP_QSTR_ptr: return MP_NATIVE_TYPE_PTR; + case MP_QSTR_ptr8: return MP_NATIVE_TYPE_PTR8; + case MP_QSTR_ptr16: return MP_NATIVE_TYPE_PTR16; + case MP_QSTR_ptr32: return MP_NATIVE_TYPE_PTR32; + default: return -1; + } +} + // convert a MicroPython object to a valid native value based on type -mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type) { - DEBUG_printf("mp_convert_obj_to_native(%p, " UINT_FMT ")\n", obj, type); +mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type) { + DEBUG_printf("mp_native_from_obj(%p, " UINT_FMT ")\n", obj, type); switch (type & 0xf) { case MP_NATIVE_TYPE_OBJ: return (mp_uint_t)obj; - case MP_NATIVE_TYPE_BOOL: + case MP_NATIVE_TYPE_BOOL: return mp_obj_is_true(obj); case MP_NATIVE_TYPE_INT: case MP_NATIVE_TYPE_UINT: return mp_obj_get_int_truncated(obj); default: { // cast obj to a pointer @@ -66,8 +80,8 @@ mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type) { #if MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM // convert a native value to a MicroPython object based on type -mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type) { - DEBUG_printf("mp_convert_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type); +mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type) { + DEBUG_printf("mp_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type); switch (type & 0xf) { case MP_NATIVE_TYPE_OBJ: return (mp_obj_t)val; case MP_NATIVE_TYPE_BOOL: return mp_obj_new_bool(val); @@ -83,6 +97,20 @@ mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type) { #if MICROPY_EMIT_NATIVE +mp_obj_dict_t *mp_native_swap_globals(mp_obj_dict_t *new_globals) { + if (new_globals == NULL) { + // Globals were the originally the same so don't restore them + return NULL; + } + mp_obj_dict_t *old_globals = mp_globals_get(); + if (old_globals == new_globals) { + // Don't set globals if they are the same, and return NULL to indicate this + return NULL; + } + mp_globals_set(new_globals); + return old_globals; +} + // wrapper that accepts n_args and n_kw in one argument // (native emitter can only pass at most 3 arguments to a function) mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, size_t n_args_kw, const mp_obj_t *args) { @@ -92,7 +120,7 @@ mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, size_t n_args_kw, const m // wrapper that makes raise obj and raises it // END_FINALLY opcode requires that we don't raise if o==None void mp_native_raise(mp_obj_t o) { - if (o != mp_const_none) { + if (o != MP_OBJ_NULL && o != mp_const_none) { nlr_raise(mp_make_raise_obj(o)); } } @@ -123,10 +151,50 @@ STATIC mp_obj_t mp_native_iternext(mp_obj_iter_buf_t *iter) { return mp_iternext(obj); } +STATIC bool mp_native_yield_from(mp_obj_t gen, mp_obj_t send_value, mp_obj_t *ret_value) { + mp_vm_return_kind_t ret_kind; + nlr_buf_t nlr_buf; + mp_obj_t throw_value = *ret_value; + if (nlr_push(&nlr_buf) == 0) { + if (throw_value != MP_OBJ_NULL) { + send_value = MP_OBJ_NULL; + } + ret_kind = mp_resume(gen, send_value, throw_value, ret_value); + nlr_pop(); + } else { + ret_kind = MP_VM_RETURN_EXCEPTION; + *ret_value = nlr_buf.ret_val; + } + + if (ret_kind == MP_VM_RETURN_YIELD) { + return true; + } else if (ret_kind == MP_VM_RETURN_NORMAL) { + if (*ret_value == MP_OBJ_STOP_ITERATION) { + *ret_value = mp_const_none; + } + } else { + assert(ret_kind == MP_VM_RETURN_EXCEPTION); + if (!mp_obj_exception_match(*ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) { + nlr_raise(*ret_value); + } + *ret_value = mp_obj_exception_get_value(*ret_value); + } + + if (throw_value != MP_OBJ_NULL && mp_obj_exception_match(throw_value, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { + nlr_raise(mp_make_raise_obj(throw_value)); + } + + return false; +} + // these must correspond to the respective enum in runtime0.h -void *const mp_fun_table[MP_F_NUMBER_OF] = { - mp_convert_obj_to_native, - mp_convert_native_to_obj, +const void *const mp_fun_table[MP_F_NUMBER_OF] = { + &mp_const_none_obj, + &mp_const_false_obj, + &mp_const_true_obj, + mp_native_from_obj, + mp_native_to_obj, + mp_native_swap_globals, mp_load_name, mp_load_global, mp_load_build_class, @@ -146,8 +214,8 @@ void *const mp_fun_table[MP_F_NUMBER_OF] = { mp_obj_new_dict, mp_obj_dict_store, #if MICROPY_PY_BUILTINS_SET - mp_obj_new_set, mp_obj_set_store, + mp_obj_new_set, #endif mp_make_function_from_raw_code, mp_native_call_function_n_kw, @@ -170,9 +238,11 @@ void *const mp_fun_table[MP_F_NUMBER_OF] = { mp_delete_global, mp_obj_new_cell, mp_make_closure_from_raw_code, + mp_arg_check_num_sig, mp_setup_code_state, mp_small_int_floor_divide, mp_small_int_modulo, + mp_native_yield_from, }; /* diff --git a/python/src/py/nlrx86.c b/python/src/py/nlrx86.c index 23882cc30..59b97d8ee 100644 --- a/python/src/py/nlrx86.c +++ b/python/src/py/nlrx86.c @@ -39,15 +39,29 @@ unsigned int nlr_push_tail(nlr_buf_t *nlr) asm("nlr_push_tail"); __attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr); #endif +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 8 +// Since gcc 8.0 the naked attribute is supported +#define USE_NAKED (1) +#define UNDO_PRELUDE (0) +#elif defined(__ZEPHYR__) || defined(__ANDROID__) +// Zephyr and Android use a different calling convention by default +#define USE_NAKED (0) +#define UNDO_PRELUDE (0) +#else +#define USE_NAKED (0) +#define UNDO_PRELUDE (1) +#endif + +#if USE_NAKED +__attribute__((naked)) +#endif unsigned int nlr_push(nlr_buf_t *nlr) { + #if !USE_NAKED (void)nlr; + #endif __asm volatile ( - // Check for Zephyr, which uses a different calling convention - // by default. - // TODE: Better support for various x86 calling conventions - // (unfortunately, __attribute__((naked)) is not supported on x86). - #if !(defined(__ZEPHYR__) || defined(__ANDROID__)) + #if UNDO_PRELUDE "pop %ebp \n" // undo function's prelude #endif "mov 4(%esp), %edx \n" // load nlr_buf @@ -61,7 +75,9 @@ unsigned int nlr_push(nlr_buf_t *nlr) { "jmp nlr_push_tail \n" // do the rest in C ); + #if !USE_NAKED return 0; // needed to silence compiler warning + #endif } NORETURN void nlr_jump(void *val) { diff --git a/python/src/py/obj.c b/python/src/py/obj.c index a1de89a03..122f0ea62 100644 --- a/python/src/py/obj.c +++ b/python/src/py/obj.c @@ -38,9 +38,9 @@ #include "py/stream.h" // for mp_obj_print mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in) { - if (MP_OBJ_IS_SMALL_INT(o_in)) { + if (mp_obj_is_small_int(o_in)) { return (mp_obj_type_t*)&mp_type_int; - } else if (MP_OBJ_IS_QSTR(o_in)) { + } else if (mp_obj_is_qstr(o_in)) { return (mp_obj_type_t*)&mp_type_str; #if MICROPY_PY_BUILTINS_FLOAT } else if (mp_obj_is_float(o_in)) { @@ -112,7 +112,7 @@ bool mp_obj_is_true(mp_obj_t arg) { return 1; } else if (arg == mp_const_none) { return 0; - } else if (MP_OBJ_IS_SMALL_INT(arg)) { + } else if (mp_obj_is_small_int(arg)) { if (MP_OBJ_SMALL_INT_VALUE(arg) == 0) { return 0; } else { @@ -167,7 +167,7 @@ bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) { && !mp_obj_is_float(o1) #endif #if MICROPY_PY_BUILTINS_COMPLEX - && !MP_OBJ_IS_TYPE(o1, &mp_type_complex) + && !mp_obj_is_type(o1, &mp_type_complex) #endif ) { return true; @@ -177,8 +177,8 @@ bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) { } // fast path for small ints - if (MP_OBJ_IS_SMALL_INT(o1)) { - if (MP_OBJ_IS_SMALL_INT(o2)) { + if (mp_obj_is_small_int(o1)) { + if (mp_obj_is_small_int(o2)) { // both SMALL_INT, and not equal if we get here return false; } else { @@ -189,20 +189,20 @@ bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) { } // fast path for strings - if (MP_OBJ_IS_STR(o1)) { - if (MP_OBJ_IS_STR(o2)) { + if (mp_obj_is_str(o1)) { + if (mp_obj_is_str(o2)) { // both strings, use special function return mp_obj_str_equal(o1, o2); } else { // a string is never equal to anything else goto str_cmp_err; } - } else if (MP_OBJ_IS_STR(o2)) { + } else if (mp_obj_is_str(o2)) { // o1 is not a string (else caught above), so the objects are not equal str_cmp_err: #if MICROPY_PY_STR_BYTES_CMP_WARN - if (MP_OBJ_IS_TYPE(o1, &mp_type_bytes) || MP_OBJ_IS_TYPE(o2, &mp_type_bytes)) { - mp_warning("Comparison between bytes and str"); + if (mp_obj_is_type(o1, &mp_type_bytes) || mp_obj_is_type(o2, &mp_type_bytes)) { + mp_warning(MP_WARN_CAT(BytesWarning), "Comparison between bytes and str"); } #endif return false; @@ -230,22 +230,18 @@ mp_int_t mp_obj_get_int(mp_const_obj_t arg) { return 0; } else if (arg == mp_const_true) { return 1; - } else if (MP_OBJ_IS_SMALL_INT(arg)) { + } else if (mp_obj_is_small_int(arg)) { return MP_OBJ_SMALL_INT_VALUE(arg); - } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) { + } else if (mp_obj_is_type(arg, &mp_type_int)) { return mp_obj_int_get_checked(arg); } else { - if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { - mp_raise_TypeError("can't convert to int"); - } else { - nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "can't convert %s to int", mp_obj_get_type_str(arg))); - } + mp_obj_t res = mp_unary_op(MP_UNARY_OP_INT, (mp_obj_t)arg); + return mp_obj_int_get_checked(res); } } mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg) { - if (MP_OBJ_IS_INT(arg)) { + if (mp_obj_is_int(arg)) { return mp_obj_int_get_truncated(arg); } else { return mp_obj_get_int(arg); @@ -260,9 +256,9 @@ bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value) { *value = 0; } else if (arg == mp_const_true) { *value = 1; - } else if (MP_OBJ_IS_SMALL_INT(arg)) { + } else if (mp_obj_is_small_int(arg)) { *value = MP_OBJ_SMALL_INT_VALUE(arg); - } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) { + } else if (mp_obj_is_type(arg, &mp_type_int)) { *value = mp_obj_int_get_checked(arg); } else { return false; @@ -278,10 +274,10 @@ bool mp_obj_get_float_maybe(mp_obj_t arg, mp_float_t *value) { val = 0; } else if (arg == mp_const_true) { val = 1; - } else if (MP_OBJ_IS_SMALL_INT(arg)) { + } else if (mp_obj_is_small_int(arg)) { val = MP_OBJ_SMALL_INT_VALUE(arg); #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE - } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) { + } else if (mp_obj_is_type(arg, &mp_type_int)) { val = mp_obj_int_as_float_impl(arg); #endif } else if (mp_obj_is_float(arg)) { @@ -317,18 +313,18 @@ void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) { } else if (arg == mp_const_true) { *real = 1; *imag = 0; - } else if (MP_OBJ_IS_SMALL_INT(arg)) { + } else if (mp_obj_is_small_int(arg)) { *real = MP_OBJ_SMALL_INT_VALUE(arg); *imag = 0; #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE - } else if (MP_OBJ_IS_TYPE(arg, &mp_type_int)) { + } else if (mp_obj_is_type(arg, &mp_type_int)) { *real = mp_obj_int_as_float_impl(arg); *imag = 0; #endif } else if (mp_obj_is_float(arg)) { *real = mp_obj_float_get(arg); *imag = 0; - } else if (MP_OBJ_IS_TYPE(arg, &mp_type_complex)) { + } else if (mp_obj_is_type(arg, &mp_type_complex)) { mp_obj_complex_get(arg, real, imag); } else { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { @@ -344,16 +340,16 @@ void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) { // note: returned value in *items may point to the interior of a GC block void mp_obj_get_array(mp_obj_t o, size_t *len, mp_obj_t **items) { - if (MP_OBJ_IS_TYPE(o, &mp_type_tuple)) { + if (mp_obj_is_type(o, &mp_type_tuple)) { mp_obj_tuple_get(o, len, items); - } else if (MP_OBJ_IS_TYPE(o, &mp_type_list)) { + } else if (mp_obj_is_type(o, &mp_type_list)) { mp_obj_list_get(o, len, items); } else { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { mp_raise_TypeError("expected tuple/list"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "object '%s' is not a tuple or list", mp_obj_get_type_str(o))); + "object '%s' isn't a tuple or list", mp_obj_get_type_str(o))); } } } @@ -375,7 +371,7 @@ void mp_obj_get_array_fixed_n(mp_obj_t o, size_t len, mp_obj_t **items) { // is_slice determines whether the index is a slice index size_t mp_get_index(const mp_obj_type_t *type, size_t len, mp_obj_t index, bool is_slice) { mp_int_t i; - if (MP_OBJ_IS_SMALL_INT(index)) { + if (mp_obj_is_small_int(index)) { i = MP_OBJ_SMALL_INT_VALUE(index); } else if (!mp_obj_get_int_maybe(index, &i)) { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { @@ -413,7 +409,7 @@ size_t mp_get_index(const mp_obj_type_t *type, size_t len, mp_obj_t index, bool mp_obj_t mp_obj_id(mp_obj_t o_in) { mp_int_t id = (mp_int_t)o_in; - if (!MP_OBJ_IS_OBJ(o_in)) { + if (!mp_obj_is_obj(o_in)) { return mp_obj_new_int(id); } else if (id >= 0) { // Many OSes and CPUs have affinity for putting "user" memories @@ -449,9 +445,9 @@ mp_obj_t mp_obj_len_maybe(mp_obj_t o_in) { if ( #if !MICROPY_PY_BUILTINS_STR_UNICODE // It's simple - unicode is slow, non-unicode is fast - MP_OBJ_IS_STR(o_in) || + mp_obj_is_str(o_in) || #endif - MP_OBJ_IS_TYPE(o_in, &mp_type_bytes)) { + mp_obj_is_type(o_in, &mp_type_bytes)) { GET_STR_LEN(o_in, l); return MP_OBJ_NEW_SMALL_INT(l); } else { @@ -475,24 +471,24 @@ mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value) { } if (value == MP_OBJ_NULL) { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { - mp_raise_TypeError("object does not support item deletion"); + mp_raise_TypeError("object doesn't support item deletion"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object does not support item deletion", mp_obj_get_type_str(base))); + "'%s' object doesn't support item deletion", mp_obj_get_type_str(base))); } } else if (value == MP_OBJ_SENTINEL) { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { - mp_raise_TypeError("object is not subscriptable"); + mp_raise_TypeError("object isn't subscriptable"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object is not subscriptable", mp_obj_get_type_str(base))); + "'%s' object isn't subscriptable", mp_obj_get_type_str(base))); } } else { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { - mp_raise_TypeError("object does not support item assignment"); + mp_raise_TypeError("object doesn't support item assignment"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object does not support item assignment", mp_obj_get_type_str(base))); + "'%s' object doesn't support item assignment", mp_obj_get_type_str(base))); } } } diff --git a/python/src/py/obj.h b/python/src/py/obj.h index 95a94836e..a22d5f8b8 100644 --- a/python/src/py/obj.h +++ b/python/src/py/obj.h @@ -65,14 +65,14 @@ typedef struct _mp_obj_base_t mp_obj_base_t; // For debugging purposes they are all different. For non-debug mode, we alias // as many as we can to MP_OBJ_NULL because it's cheaper to load/compare 0. -#ifdef NDEBUG -#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0)) -#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)0)) -#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)4)) -#else +#if MICROPY_DEBUG_MP_OBJ_SENTINELS #define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0)) #define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)4)) #define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)8)) +#else +#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0)) +#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)0)) +#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)4)) #endif // These macros/inline functions operate on objects and depend on the @@ -81,12 +81,12 @@ typedef struct _mp_obj_base_t mp_obj_base_t; #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A -static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o) +static inline bool mp_obj_is_small_int(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 1) != 0); } #define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1) #define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 1) | 1)) -static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o) +static inline bool mp_obj_is_qstr(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 3) == 2); } #define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 2) #define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 2)) @@ -97,22 +97,22 @@ static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o) extern const struct _mp_obj_float_t mp_const_float_e_obj; extern const struct _mp_obj_float_t mp_const_float_pi_obj; -#define mp_obj_is_float(o) MP_OBJ_IS_TYPE((o), &mp_type_float) +#define mp_obj_is_float(o) mp_obj_is_type((o), &mp_type_float) mp_float_t mp_obj_float_get(mp_obj_t self_in); mp_obj_t mp_obj_new_float(mp_float_t value); #endif -static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o) +static inline bool mp_obj_is_obj(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 3) == 0); } #elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_B -static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o) +static inline bool mp_obj_is_small_int(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 3) == 1); } #define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 2) #define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 2) | 1)) -static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o) +static inline bool mp_obj_is_qstr(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 3) == 3); } #define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 2) #define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 3)) @@ -123,21 +123,22 @@ static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o) extern const struct _mp_obj_float_t mp_const_float_e_obj; extern const struct _mp_obj_float_t mp_const_float_pi_obj; -#define mp_obj_is_float(o) MP_OBJ_IS_TYPE((o), &mp_type_float) +#define mp_obj_is_float(o) mp_obj_is_type((o), &mp_type_float) mp_float_t mp_obj_float_get(mp_obj_t self_in); mp_obj_t mp_obj_new_float(mp_float_t value); #endif -static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o) +static inline bool mp_obj_is_obj(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 1) == 0); } #elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C -static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o) +static inline bool mp_obj_is_small_int(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 1) != 0); } #define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1) #define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 1) | 1)) +#if MICROPY_PY_BUILTINS_FLOAT #define mp_const_float_e MP_ROM_PTR((mp_obj_t)(((0x402df854 & ~3) | 2) + 0x80800000)) #define mp_const_float_pi MP_ROM_PTR((mp_obj_t)(((0x40490fdb & ~3) | 2) + 0x80800000)) @@ -157,28 +158,34 @@ static inline mp_obj_t mp_obj_new_float(mp_float_t f) { } num = {.f = f}; return (mp_obj_t)(((num.u & ~0x3) | 2) + 0x80800000); } +#endif -static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o) +static inline bool mp_obj_is_qstr(mp_const_obj_t o) { return (((mp_uint_t)(o)) & 0xff800007) == 0x00000006; } #define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3) #define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 0x00000006)) -static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o) +static inline bool mp_obj_is_obj(mp_const_obj_t o) { return ((((mp_int_t)(o)) & 3) == 0); } #elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D -static inline bool MP_OBJ_IS_SMALL_INT(mp_const_obj_t o) - { return ((((mp_int_t)(o)) & 0xffff000000000000) == 0x0001000000000000); } +static inline bool mp_obj_is_small_int(mp_const_obj_t o) + { return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0001000000000000); } #define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)((o) << 16)) >> 17) #define MP_OBJ_NEW_SMALL_INT(small_int) (((((uint64_t)(small_int)) & 0x7fffffffffff) << 1) | 0x0001000000000001) -static inline bool MP_OBJ_IS_QSTR(mp_const_obj_t o) - { return ((((mp_int_t)(o)) & 0xffff000000000000) == 0x0002000000000000); } +static inline bool mp_obj_is_qstr(mp_const_obj_t o) + { return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0002000000000000); } #define MP_OBJ_QSTR_VALUE(o) ((((uint32_t)(o)) >> 1) & 0xffffffff) #define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 1) | 0x0002000000000001)) #if MICROPY_PY_BUILTINS_FLOAT + +#if MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_DOUBLE +#error MICROPY_OBJ_REPR_D requires MICROPY_FLOAT_IMPL_DOUBLE +#endif + #define mp_const_float_e {((mp_obj_t)((uint64_t)0x4005bf0a8b145769 + 0x8004000000000000))} #define mp_const_float_pi {((mp_obj_t)((uint64_t)0x400921fb54442d18 + 0x8004000000000000))} @@ -201,7 +208,7 @@ static inline mp_obj_t mp_obj_new_float(mp_float_t f) { } #endif -static inline bool MP_OBJ_IS_OBJ(mp_const_obj_t o) +static inline bool mp_obj_is_obj(mp_const_obj_t o) { return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0000000000000000); } #define MP_OBJ_TO_PTR(o) ((void*)(uintptr_t)(o)) #define MP_OBJ_FROM_PTR(p) ((mp_obj_t)((uintptr_t)(p))) @@ -248,17 +255,6 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t; */ #endif -// The macros below are derived from the ones above and are used to -// check for more specific object types. -// Note: these are kept as macros because inline functions sometimes use much -// more code space than the equivalent macros, depending on the compiler. - -#define MP_OBJ_IS_TYPE(o, t) (MP_OBJ_IS_OBJ(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type == (t))) // this does not work for checking int, str or fun; use below macros for that -#define MP_OBJ_IS_INT(o) (MP_OBJ_IS_SMALL_INT(o) || MP_OBJ_IS_TYPE(o, &mp_type_int)) -#define MP_OBJ_IS_STR(o) (MP_OBJ_IS_QSTR(o) || MP_OBJ_IS_TYPE(o, &mp_type_str)) -#define MP_OBJ_IS_STR_OR_BYTES(o) (MP_OBJ_IS_QSTR(o) || (MP_OBJ_IS_OBJ(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->binary_op == mp_obj_str_binary_op)) -#define MP_OBJ_IS_FUN(o) (MP_OBJ_IS_OBJ(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->name == MP_QSTR_function)) - // These macros are used to declare and define constant function objects // You can put "static" in front of the definitions to make them local @@ -270,6 +266,9 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t; #define MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(obj_name) extern const mp_obj_fun_builtin_var_t obj_name #define MP_DECLARE_CONST_FUN_OBJ_KW(obj_name) extern const mp_obj_fun_builtin_var_t obj_name +#define MP_OBJ_FUN_ARGS_MAX (0xffff) // to set maximum value in n_args_max below +#define MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, takes_kw) ((uint32_t)((((uint32_t)(n_args_min)) << 17) | (((uint32_t)(n_args_max)) << 1) | ((takes_kw) ? 1 : 0))) + #define MP_DEFINE_CONST_FUN_OBJ_0(obj_name, fun_name) \ const mp_obj_fun_builtin_fixed_t obj_name = \ {{&mp_type_fun_builtin_0}, .fun._0 = fun_name} @@ -284,13 +283,13 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t; {{&mp_type_fun_builtin_3}, .fun._3 = fun_name} #define MP_DEFINE_CONST_FUN_OBJ_VAR(obj_name, n_args_min, fun_name) \ const mp_obj_fun_builtin_var_t obj_name = \ - {{&mp_type_fun_builtin_var}, false, n_args_min, MP_OBJ_FUN_ARGS_MAX, .fun.var = fun_name} + {{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, false), .fun.var = fun_name} #define MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(obj_name, n_args_min, n_args_max, fun_name) \ const mp_obj_fun_builtin_var_t obj_name = \ - {{&mp_type_fun_builtin_var}, false, n_args_min, n_args_max, .fun.var = fun_name} + {{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, false), .fun.var = fun_name} #define MP_DEFINE_CONST_FUN_OBJ_KW(obj_name, n_args_min, fun_name) \ const mp_obj_fun_builtin_var_t obj_name = \ - {{&mp_type_fun_builtin_var}, true, n_args_min, MP_OBJ_FUN_ARGS_MAX, .fun.kw = fun_name} + {{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, true), .fun.kw = fun_name} // These macros are used to define constant map/dict objects // You can put "static" in front of the definition to make it local @@ -327,6 +326,13 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t; #define MP_DEFINE_CONST_STATICMETHOD_OBJ(obj_name, fun_name) const mp_rom_obj_static_class_method_t obj_name = {{&mp_type_staticmethod}, fun_name} #define MP_DEFINE_CONST_CLASSMETHOD_OBJ(obj_name, fun_name) const mp_rom_obj_static_class_method_t obj_name = {{&mp_type_classmethod}, fun_name} +// Declare a module as a builtin, processed by makemoduledefs.py +// param module_name: MP_QSTR_ +// param obj_module: mp_obj_module_t instance +// prarm enabled_define: used as `#if (enabled_define) around entry` + +#define MP_REGISTER_MODULE(module_name, obj_module, enabled_define) + // Underlying map/hash table implementation (not dict object or map function) typedef struct _mp_map_elem_t { @@ -363,7 +369,7 @@ typedef enum _mp_map_lookup_kind_t { extern const mp_map_t mp_const_empty_map; -static inline bool MP_MAP_SLOT_IS_FILLED(const mp_map_t *map, size_t pos) { return ((map)->table[pos].key != MP_OBJ_NULL && (map)->table[pos].key != MP_OBJ_SENTINEL); } +static inline bool mp_map_slot_is_filled(const mp_map_t *map, size_t pos) { return ((map)->table[pos].key != MP_OBJ_NULL && (map)->table[pos].key != MP_OBJ_SENTINEL); } void mp_map_init(mp_map_t *map, size_t n); void mp_map_init_fixed_table(mp_map_t *map, size_t n, const mp_obj_t *table); @@ -382,7 +388,7 @@ typedef struct _mp_set_t { mp_obj_t *table; } mp_set_t; -static inline bool MP_SET_SLOT_IS_FILLED(const mp_set_t *set, size_t pos) { return ((set)->table[pos] != MP_OBJ_NULL && (set)->table[pos] != MP_OBJ_SENTINEL); } +static inline bool mp_set_slot_is_filled(const mp_set_t *set, size_t pos) { return ((set)->table[pos] != MP_OBJ_NULL && (set)->table[pos] != MP_OBJ_SENTINEL); } void mp_set_init(mp_set_t *set, size_t n); mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind); @@ -451,22 +457,15 @@ typedef struct _mp_buffer_p_t { bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags); void mp_get_buffer_raise(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags); -// Stream protocol -typedef struct _mp_stream_p_t { - // On error, functions should return MP_STREAM_ERROR and fill in *errcode (values - // are implementation-dependent, but will be exposed to user, e.g. via exception). - mp_uint_t (*read)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode); - mp_uint_t (*write)(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode); - mp_uint_t (*ioctl)(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode); - mp_uint_t is_text : 1; // default is bytes, set this for text stream -} mp_stream_p_t; - struct _mp_obj_type_t { // A type is an object so must start with this entry, which points to mp_type_type. mp_obj_base_t base; - // The name of this type. - qstr name; + // Flags associated with this type. + uint16_t flags; + + // The name of this type, a qstr. + uint16_t name; // Corresponds to __repr__ and __str__ special methods. mp_print_fun_t print; @@ -554,6 +553,8 @@ extern const mp_obj_type_t mp_type_slice; extern const mp_obj_type_t mp_type_zip; extern const mp_obj_type_t mp_type_array; extern const mp_obj_type_t mp_type_super; +extern const mp_obj_type_t mp_type_gen_wrap; +extern const mp_obj_type_t mp_type_native_gen_wrap; extern const mp_obj_type_t mp_type_gen_instance; extern const mp_obj_type_t mp_type_fun_builtin_0; extern const mp_obj_type_t mp_type_fun_builtin_1; @@ -620,6 +621,16 @@ extern const struct _mp_obj_exception_t mp_const_GeneratorExit_obj; // General API for objects +// These macros are derived from more primitive ones and are used to +// check for more specific object types. +// Note: these are kept as macros because inline functions sometimes use much +// more code space than the equivalent macros, depending on the compiler. +#define mp_obj_is_type(o, t) (mp_obj_is_obj(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type == (t))) // this does not work for checking int, str or fun; use below macros for that +#define mp_obj_is_int(o) (mp_obj_is_small_int(o) || mp_obj_is_type(o, &mp_type_int)) +#define mp_obj_is_str(o) (mp_obj_is_qstr(o) || mp_obj_is_type(o, &mp_type_str)) +#define mp_obj_is_str_or_bytes(o) (mp_obj_is_qstr(o) || (mp_obj_is_obj(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->binary_op == mp_obj_str_binary_op)) +#define mp_obj_is_fun(o) (mp_obj_is_obj(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->name == MP_QSTR_function)) + mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict); static inline mp_obj_t mp_obj_new_bool(mp_int_t x) { return x ? mp_const_true : mp_const_false; } mp_obj_t mp_obj_new_cell(mp_obj_t obj); @@ -645,8 +656,7 @@ mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!) mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table); mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table); -mp_obj_t mp_obj_new_fun_viper(size_t n_args, void *fun_data, mp_uint_t type_sig); -mp_obj_t mp_obj_new_fun_asm(size_t n_args, void *fun_data, mp_uint_t type_sig); +mp_obj_t mp_obj_new_fun_asm(size_t n_args, const void *fun_data, mp_uint_t type_sig); mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun); mp_obj_t mp_obj_new_closure(mp_obj_t fun, size_t n_closed, const mp_obj_t *closed); mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items); @@ -672,7 +682,7 @@ bool mp_obj_is_true(mp_obj_t arg); bool mp_obj_is_callable(mp_obj_t o_in); bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2); -static inline bool mp_obj_is_integer(mp_const_obj_t o) { return MP_OBJ_IS_INT(o) || MP_OBJ_IS_TYPE(o, &mp_type_bool); } // returns true if o is bool, small int or long int +static inline bool mp_obj_is_integer(mp_const_obj_t o) { return mp_obj_is_int(o) || mp_obj_is_type(o, &mp_type_bool); } // returns true if o is bool, small int or long int mp_int_t mp_obj_get_int(mp_const_obj_t arg); mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg); bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value); @@ -762,7 +772,9 @@ size_t mp_obj_dict_len(mp_obj_t self_in); mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index); mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value); mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key); -mp_map_t *mp_obj_dict_get_map(mp_obj_t self_in); +static inline mp_map_t *mp_obj_dict_get_map(mp_obj_t dict) { + return &((mp_obj_dict_t*)MP_OBJ_TO_PTR(dict))->map; +} // set void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item); @@ -782,12 +794,9 @@ typedef struct _mp_obj_fun_builtin_fixed_t { } fun; } mp_obj_fun_builtin_fixed_t; -#define MP_OBJ_FUN_ARGS_MAX (0xffff) // to set maximum value in n_args_max below typedef struct _mp_obj_fun_builtin_var_t { mp_obj_base_t base; - bool is_kw : 1; - mp_uint_t n_args_min : 15; // inclusive - mp_uint_t n_args_max : 16; // inclusive + uint32_t sig; // see MP_OBJ_FUN_MAKE_SIG union { mp_fun_var_t var; mp_fun_kw_t kw; @@ -806,7 +815,9 @@ typedef struct _mp_obj_module_t { mp_obj_base_t base; mp_obj_dict_t *globals; } mp_obj_module_t; -mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in); +static inline mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t module) { + return ((mp_obj_module_t*)MP_OBJ_TO_PTR(module))->globals; +} // check if given module object is a package bool mp_obj_is_package(mp_obj_t module); @@ -858,4 +869,16 @@ mp_obj_t mp_seq_extract_slice(size_t len, const mp_obj_t *seq, mp_bound_slice_t memmove(((char*)dest) + (beg + slice_len) * (item_sz), ((char*)dest) + (end) * (item_sz), ((dest_len) + (len_adj) - ((beg) + (slice_len))) * (item_sz)); \ memmove(((char*)dest) + (beg) * (item_sz), slice, slice_len * (item_sz)); +// Provide translation for legacy API +#define MP_OBJ_IS_SMALL_INT mp_obj_is_small_int +#define MP_OBJ_IS_QSTR mp_obj_is_qstr +#define MP_OBJ_IS_OBJ mp_obj_is_obj +#define MP_OBJ_IS_INT mp_obj_is_int +#define MP_OBJ_IS_TYPE mp_obj_is_type +#define MP_OBJ_IS_STR mp_obj_is_str +#define MP_OBJ_IS_STR_OR_BYTES mp_obj_is_str_or_bytes +#define MP_OBJ_IS_FUN mp_obj_is_fun +#define MP_MAP_SLOT_IS_FILLED mp_map_slot_is_filled +#define MP_SET_SLOT_IS_FILLED mp_set_slot_is_filled + #endif // MICROPY_INCLUDED_PY_OBJ_H diff --git a/python/src/py/objarray.c b/python/src/py/objarray.c index a35539484..4e58d8e5d 100644 --- a/python/src/py/objarray.c +++ b/python/src/py/objarray.c @@ -50,11 +50,14 @@ // Note that we don't handle the case where the original buffer might change // size due to a resize of the original parent object. -// make (& TYPECODE_MASK) a null operation if memorview not enabled #if MICROPY_PY_BUILTINS_MEMORYVIEW #define TYPECODE_MASK (0x7f) +#define memview_offset free #else +// make (& TYPECODE_MASK) a null operation if memorview not enabled #define TYPECODE_MASK (~(size_t)0) +// memview_offset should not be accessed if memoryview is not enabled, +// so not defined to catch errors #endif STATIC mp_obj_t array_iterator_new(mp_obj_t array_in, mp_obj_iter_buf_t *iter_buf); @@ -116,8 +119,8 @@ STATIC mp_obj_t array_construct(char typecode, mp_obj_t initializer) { if (((MICROPY_PY_BUILTINS_BYTEARRAY && typecode == BYTEARRAY_TYPECODE) || (MICROPY_PY_ARRAY - && (MP_OBJ_IS_TYPE(initializer, &mp_type_bytes) - || (MICROPY_PY_BUILTINS_BYTEARRAY && MP_OBJ_IS_TYPE(initializer, &mp_type_bytearray))))) + && (mp_obj_is_type(initializer, &mp_type_bytes) + || (MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(initializer, &mp_type_bytearray))))) && mp_get_buffer(initializer, &bufinfo, MP_BUFFER_READ)) { // construct array from raw bytes // we round-down the len to make it a multiple of sz (CPython raises error) @@ -175,12 +178,13 @@ STATIC mp_obj_t array_make_new(const mp_obj_type_t *type_in, size_t n_args, size #if MICROPY_PY_BUILTINS_BYTEARRAY STATIC mp_obj_t bytearray_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { (void)type_in; - mp_arg_check_num(n_args, n_kw, 0, 1, false); + // Can take 2nd/3rd arg if constructs from str + mp_arg_check_num(n_args, n_kw, 0, 3, false); if (n_args == 0) { // no args: construct an empty bytearray return MP_OBJ_FROM_PTR(array_new(BYTEARRAY_TYPECODE, 0)); - } else if (MP_OBJ_IS_INT(args[0])) { + } else if (mp_obj_is_int(args[0])) { // 1 arg, an integer: construct a blank bytearray of that length mp_uint_t len = mp_obj_get_int(args[0]); mp_obj_array_t *o = array_new(BYTEARRAY_TYPECODE, len); @@ -199,7 +203,7 @@ mp_obj_t mp_obj_new_memoryview(byte typecode, size_t nitems, void *items) { mp_obj_array_t *self = m_new_obj(mp_obj_array_t); self->base.type = &mp_type_memoryview; self->typecode = typecode; - self->free = 0; + self->memview_offset = 0; self->len = nitems; self->items = items; return MP_OBJ_FROM_PTR(self); @@ -222,11 +226,24 @@ STATIC mp_obj_t memoryview_make_new(const mp_obj_type_t *type_in, size_t n_args, // test if the object can be written to if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_RW)) { - self->typecode |= 0x80; // used to indicate writable buffer + self->typecode |= MP_OBJ_ARRAY_TYPECODE_FLAG_RW; // indicate writable buffer } return MP_OBJ_FROM_PTR(self); } + +#if MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE +STATIC void memoryview_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { + if (dest[0] != MP_OBJ_NULL) { + return; + } + if (attr == MP_QSTR_itemsize) { + mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in); + dest[0] = MP_OBJ_NEW_SMALL_INT(mp_binary_get_size('@', self->typecode & TYPECODE_MASK, NULL)); + } +} +#endif + #endif STATIC mp_obj_t array_unary_op(mp_unary_op_t op, mp_obj_t o_in) { @@ -270,21 +287,22 @@ STATIC mp_obj_t array_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs } case MP_BINARY_OP_CONTAINS: { + #if MICROPY_PY_BUILTINS_BYTEARRAY + // Can search string only in bytearray mp_buffer_info_t lhs_bufinfo; mp_buffer_info_t rhs_bufinfo; - - // Can search string only in bytearray if (mp_get_buffer(rhs_in, &rhs_bufinfo, MP_BUFFER_READ)) { - if (!MP_OBJ_IS_TYPE(lhs_in, &mp_type_bytearray)) { + if (!mp_obj_is_type(lhs_in, &mp_type_bytearray)) { return mp_const_false; } array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ); return mp_obj_new_bool( find_subbytes(lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len, 1) != NULL); } + #endif // Otherwise, can only look for a scalar numeric value in an array - if (MP_OBJ_IS_INT(rhs_in) || mp_obj_is_float(rhs_in)) { + if (mp_obj_is_int(rhs_in) || mp_obj_is_float(rhs_in)) { mp_raise_NotImplementedError(NULL); } @@ -309,8 +327,8 @@ STATIC mp_obj_t array_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs #if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY STATIC mp_obj_t array_append(mp_obj_t self_in, mp_obj_t arg) { // self is not a memoryview, so we don't need to use (& TYPECODE_MASK) - assert((MICROPY_PY_BUILTINS_BYTEARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_bytearray)) - || (MICROPY_PY_ARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_array))); + assert((MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(self_in, &mp_type_bytearray)) + || (MICROPY_PY_ARRAY && mp_obj_is_type(self_in, &mp_type_array))); mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in); if (self->free == 0) { @@ -330,8 +348,8 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_2(array_append_obj, array_append); STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in) { // self is not a memoryview, so we don't need to use (& TYPECODE_MASK) - assert((MICROPY_PY_BUILTINS_BYTEARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_bytearray)) - || (MICROPY_PY_ARRAY && MP_OBJ_IS_TYPE(self_in, &mp_type_array))); + assert((MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(self_in, &mp_type_bytearray)) + || (MICROPY_PY_ARRAY && mp_obj_is_type(self_in, &mp_type_array))); mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in); // allow to extend by anything that has the buffer protocol (extension to CPython) @@ -370,9 +388,8 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value return MP_OBJ_NULL; // op not supported } else { mp_obj_array_t *o = MP_OBJ_TO_PTR(self_in); - if (0) { #if MICROPY_PY_BUILTINS_SLICE - } else if (MP_OBJ_IS_TYPE(index_in, &mp_type_slice)) { + if (mp_obj_is_type(index_in, &mp_type_slice)) { mp_bound_slice_t slice; if (!mp_seq_get_fast_slice_indexes(o->len, index_in, &slice)) { mp_raise_NotImplementedError("only slices with step=1 (aka None) are supported"); @@ -383,7 +400,7 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value size_t src_len; void *src_items; size_t item_sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL); - if (MP_OBJ_IS_OBJ(value) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(value))->type->subscr == array_subscr) { + if (mp_obj_is_obj(value) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(value))->type->subscr == array_subscr) { // value is array, bytearray or memoryview mp_obj_array_t *src_slice = MP_OBJ_TO_PTR(value); if (item_sz != mp_binary_get_size('@', src_slice->typecode & TYPECODE_MASK, NULL)) { @@ -393,11 +410,11 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value src_len = src_slice->len; src_items = src_slice->items; #if MICROPY_PY_BUILTINS_MEMORYVIEW - if (MP_OBJ_IS_TYPE(value, &mp_type_memoryview)) { - src_items = (uint8_t*)src_items + (src_slice->free * item_sz); + if (mp_obj_is_type(value, &mp_type_memoryview)) { + src_items = (uint8_t*)src_items + (src_slice->memview_offset * item_sz); } #endif - } else if (MP_OBJ_IS_TYPE(value, &mp_type_bytes)) { + } else if (mp_obj_is_type(value, &mp_type_bytes)) { if (item_sz != 1) { goto compat_error; } @@ -414,14 +431,14 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value uint8_t* dest_items = o->items; #if MICROPY_PY_BUILTINS_MEMORYVIEW if (o->base.type == &mp_type_memoryview) { - if ((o->typecode & 0x80) == 0) { + if (!(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW)) { // store to read-only memoryview not allowed return MP_OBJ_NULL; } if (len_adj != 0) { goto compat_error; } - dest_items += o->free * item_sz; + dest_items += o->memview_offset * item_sz; } #endif if (len_adj > 0) { @@ -451,27 +468,27 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value mp_obj_array_t *res; size_t sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL); assert(sz > 0); - if (0) { - // dummy #if MICROPY_PY_BUILTINS_MEMORYVIEW - } else if (o->base.type == &mp_type_memoryview) { + if (o->base.type == &mp_type_memoryview) { res = m_new_obj(mp_obj_array_t); *res = *o; - res->free += slice.start; + res->memview_offset += slice.start; res->len = slice.stop - slice.start; + } else #endif - } else { + { res = array_new(o->typecode, slice.stop - slice.start); memcpy(res->items, (uint8_t*)o->items + slice.start * sz, (slice.stop - slice.start) * sz); } return MP_OBJ_FROM_PTR(res); + } else #endif - } else { + { size_t index = mp_get_index(o->base.type, o->len, index_in, false); #if MICROPY_PY_BUILTINS_MEMORYVIEW if (o->base.type == &mp_type_memoryview) { - index += o->free; - if (value != MP_OBJ_SENTINEL && (o->typecode & 0x80) == 0) { + index += o->memview_offset; + if (value != MP_OBJ_SENTINEL && !(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW)) { // store to read-only memoryview return MP_OBJ_NULL; } @@ -497,11 +514,11 @@ STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_ui bufinfo->typecode = o->typecode & TYPECODE_MASK; #if MICROPY_PY_BUILTINS_MEMORYVIEW if (o->base.type == &mp_type_memoryview) { - if ((o->typecode & 0x80) == 0 && (flags & MP_BUFFER_WRITE)) { + if (!(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW) && (flags & MP_BUFFER_WRITE)) { // read-only memoryview return 1; } - bufinfo->buf = (uint8_t*)bufinfo->buf + (size_t)o->free * sz; + bufinfo->buf = (uint8_t*)bufinfo->buf + (size_t)o->memview_offset * sz; } #else (void)flags; @@ -513,6 +530,9 @@ STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_ui STATIC const mp_rom_map_elem_t array_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&array_append_obj) }, { MP_ROM_QSTR(MP_QSTR_extend), MP_ROM_PTR(&array_extend_obj) }, + #if MICROPY_CPYTHON_COMPAT + { MP_ROM_QSTR(MP_QSTR_decode), MP_ROM_PTR(&bytes_decode_obj) }, + #endif }; STATIC MP_DEFINE_CONST_DICT(array_locals_dict, array_locals_dict_table); @@ -556,6 +576,9 @@ const mp_obj_type_t mp_type_memoryview = { .getiter = array_iterator_new, .unary_op = array_unary_op, .binary_op = array_binary_op, + #if MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE + .attr = memoryview_attr, + #endif .subscr = array_subscr, .buffer_p = { .get_buffer = array_get_buffer }, }; @@ -622,7 +645,7 @@ STATIC mp_obj_t array_iterator_new(mp_obj_t array_in, mp_obj_iter_buf_t *iter_bu o->cur = 0; #if MICROPY_PY_BUILTINS_MEMORYVIEW if (array->base.type == &mp_type_memoryview) { - o->offset = array->free; + o->offset = array->memview_offset; } #endif return MP_OBJ_FROM_PTR(o); diff --git a/python/src/py/objarray.h b/python/src/py/objarray.h index 038966845..2fb6e2c91 100644 --- a/python/src/py/objarray.h +++ b/python/src/py/objarray.h @@ -29,11 +29,21 @@ #include "py/obj.h" +// Used only for memoryview types, set in "typecode" to indicate a writable memoryview +#define MP_OBJ_ARRAY_TYPECODE_FLAG_RW (0x80) + +// This structure is used for all of bytearray, array.array, memoryview +// objects. Note that memoryview has different meaning for some fields, +// see comment at the beginning of objarray.c. typedef struct _mp_obj_array_t { mp_obj_base_t base; size_t typecode : 8; // free is number of unused elements after len used elements // alloc size = len + free + // But for memoryview, 'free' is reused as offset (in elements) into the + // parent object. (Union is not used to not go into a complication of + // union-of-bitfields with different toolchains). See comments in + // objarray.c. size_t free : (8 * sizeof(size_t) - 8); size_t len; // in elements void *items; diff --git a/python/src/py/objboundmeth.c b/python/src/py/objboundmeth.c index b0df6a68a..8fc44f163 100644 --- a/python/src/py/objboundmeth.c +++ b/python/src/py/objboundmeth.c @@ -89,10 +89,9 @@ STATIC void bound_meth_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { // not load attribute return; } - if (attr == MP_QSTR___name__) { - mp_obj_bound_meth_t *o = MP_OBJ_TO_PTR(self_in); - dest[0] = MP_OBJ_NEW_QSTR(mp_obj_fun_get_name(o->meth)); - } + // Delegate the load to the method object + mp_obj_bound_meth_t *self = MP_OBJ_TO_PTR(self_in); + mp_load_method_maybe(self->meth, attr, dest); } #endif diff --git a/python/src/py/objcomplex.c b/python/src/py/objcomplex.c index 409d65666..bf6fb51dc 100644 --- a/python/src/py/objcomplex.c +++ b/python/src/py/objcomplex.c @@ -79,12 +79,12 @@ STATIC mp_obj_t complex_make_new(const mp_obj_type_t *type_in, size_t n_args, si return mp_obj_new_complex(0, 0); case 1: - if (MP_OBJ_IS_STR(args[0])) { + if (mp_obj_is_str(args[0])) { // a string, parse it size_t l; const char *s = mp_obj_str_get_data(args[0], &l); return mp_parse_num_decimal(s, l, true, true, NULL); - } else if (MP_OBJ_IS_TYPE(args[0], &mp_type_complex)) { + } else if (mp_obj_is_type(args[0], &mp_type_complex)) { // a complex, just return it return args[0]; } else { @@ -95,13 +95,13 @@ STATIC mp_obj_t complex_make_new(const mp_obj_type_t *type_in, size_t n_args, si case 2: default: { mp_float_t real, imag; - if (MP_OBJ_IS_TYPE(args[0], &mp_type_complex)) { + if (mp_obj_is_type(args[0], &mp_type_complex)) { mp_obj_complex_get(args[0], &real, &imag); } else { real = mp_obj_get_float(args[0]); imag = 0; } - if (MP_OBJ_IS_TYPE(args[1], &mp_type_complex)) { + if (mp_obj_is_type(args[1], &mp_type_complex)) { mp_float_t real2, imag2; mp_obj_complex_get(args[1], &real2, &imag2); real -= imag2; @@ -164,7 +164,7 @@ mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag) { } void mp_obj_complex_get(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_complex)); + assert(mp_obj_is_type(self_in, &mp_type_complex)); mp_obj_complex_t *self = MP_OBJ_TO_PTR(self_in); *real = self->real; *imag = self->imag; @@ -195,13 +195,13 @@ mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_flo } case MP_BINARY_OP_FLOOR_DIVIDE: case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE: - mp_raise_TypeError("can't do truncated division of a complex number"); + mp_raise_TypeError("can't truncate-divide a complex number"); case MP_BINARY_OP_TRUE_DIVIDE: case MP_BINARY_OP_INPLACE_TRUE_DIVIDE: if (rhs_imag == 0) { if (rhs_real == 0) { - mp_raise_msg(&mp_type_ZeroDivisionError, "complex division by zero"); + mp_raise_msg(&mp_type_ZeroDivisionError, "complex divide by zero"); } lhs_real /= rhs_real; lhs_imag /= rhs_real; diff --git a/python/src/py/objdict.c b/python/src/py/objdict.c index c0647067a..0a223f731 100644 --- a/python/src/py/objdict.c +++ b/python/src/py/objdict.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2017 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -31,7 +32,7 @@ #include "py/builtin.h" #include "py/objtype.h" -#define MP_OBJ_IS_DICT_TYPE(o) (MP_OBJ_IS_OBJ(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->make_new == dict_make_new) +#define mp_obj_is_dict_type(o) (mp_obj_is_obj(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->make_new == dict_make_new) STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs); @@ -43,7 +44,7 @@ STATIC mp_map_elem_t *dict_iter_next(mp_obj_dict_t *dict, size_t *cur) { mp_map_t *map = &dict->map; for (size_t i = *cur; i < max; i++) { - if (MP_MAP_SLOT_IS_FILLED(map, i)) { + if (mp_map_slot_is_filled(map, i)) { *cur = i + 1; return &(map->table[i]); } @@ -121,7 +122,7 @@ STATIC mp_obj_t dict_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_ } case MP_BINARY_OP_EQUAL: { #if MICROPY_PY_COLLECTIONS_ORDEREDDICT - if (MP_UNLIKELY(MP_OBJ_IS_TYPE(lhs_in, &mp_type_ordereddict) && MP_OBJ_IS_TYPE(rhs_in, &mp_type_ordereddict))) { + if (MP_UNLIKELY(mp_obj_is_type(lhs_in, &mp_type_ordereddict) && mp_obj_is_type(rhs_in, &mp_type_ordereddict))) { // Iterate through both dictionaries simultaneously and compare keys and values. mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in); size_t c1 = 0, c2 = 0; @@ -134,7 +135,7 @@ STATIC mp_obj_t dict_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_ return e1 == NULL && e2 == NULL ? mp_const_true : mp_const_false; } else #endif - if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_dict)) { + if (mp_obj_is_type(rhs_in, &mp_type_dict)) { mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in); if (o->map.used != rhs->map.used) { return mp_const_false; @@ -160,7 +161,7 @@ STATIC mp_obj_t dict_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_ } } -// TODO: Make sure this is inlined in dict_subscr() below. +// Note: Make sure this is inlined in load part of dict_subscr() below. mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index) { mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in); mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP); @@ -202,7 +203,7 @@ STATIC void mp_ensure_not_fixed(const mp_obj_dict_t *dict) { } STATIC mp_obj_t dict_clear(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); + mp_check_self(mp_obj_is_dict_type(self_in)); mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in); mp_ensure_not_fixed(self); @@ -213,7 +214,7 @@ STATIC mp_obj_t dict_clear(mp_obj_t self_in) { STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_clear_obj, dict_clear); STATIC mp_obj_t dict_copy(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); + mp_check_self(mp_obj_is_dict_type(self_in)); mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in); mp_obj_t other_out = mp_obj_new_dict(self->map.alloc); mp_obj_dict_t *other = MP_OBJ_TO_PTR(other_out); @@ -227,6 +228,7 @@ STATIC mp_obj_t dict_copy(mp_obj_t self_in) { } STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_copy_obj, dict_copy); +#if MICROPY_PY_BUILTINS_DICT_FROMKEYS // this is a classmethod STATIC mp_obj_t dict_fromkeys(size_t n_args, const mp_obj_t *args) { mp_obj_t iter = mp_getiter(args[1], NULL); @@ -256,9 +258,10 @@ STATIC mp_obj_t dict_fromkeys(size_t n_args, const mp_obj_t *args) { } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_fromkeys_fun_obj, 2, 3, dict_fromkeys); STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(dict_fromkeys_obj, MP_ROM_PTR(&dict_fromkeys_fun_obj)); +#endif STATIC mp_obj_t dict_get_helper(size_t n_args, const mp_obj_t *args, mp_map_lookup_kind_t lookup_kind) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(args[0])); + mp_check_self(mp_obj_is_dict_type(args[0])); mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]); if (lookup_kind != MP_MAP_LOOKUP) { mp_ensure_not_fixed(self); @@ -303,7 +306,7 @@ STATIC mp_obj_t dict_setdefault(size_t n_args, const mp_obj_t *args) { STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_setdefault_obj, 2, 3, dict_setdefault); STATIC mp_obj_t dict_popitem(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); + mp_check_self(mp_obj_is_dict_type(self_in)); mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in); mp_ensure_not_fixed(self); size_t cur = 0; @@ -322,7 +325,7 @@ STATIC mp_obj_t dict_popitem(mp_obj_t self_in) { STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_popitem_obj, dict_popitem); STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(args[0])); + mp_check_self(mp_obj_is_dict_type(args[0])); mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]); mp_ensure_not_fixed(self); @@ -331,7 +334,7 @@ STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwarg if (n_args == 2) { // given a positional argument - if (MP_OBJ_IS_DICT_TYPE(args[1])) { + if (mp_obj_is_dict_type(args[1])) { // update from other dictionary (make sure other is not self) if (args[1] != args[0]) { size_t cur = 0; @@ -362,7 +365,7 @@ STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwarg // update the dict with any keyword args for (size_t i = 0; i < kwargs->alloc; i++) { - if (MP_MAP_SLOT_IS_FILLED(kwargs, i)) { + if (mp_map_slot_is_filled(kwargs, i)) { mp_map_lookup(&self->map, kwargs->table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = kwargs->table[i].value; } } @@ -400,7 +403,7 @@ typedef struct _mp_obj_dict_view_t { } mp_obj_dict_view_t; STATIC mp_obj_t dict_view_it_iternext(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &dict_view_it_type)); + mp_check_self(mp_obj_is_type(self_in, &dict_view_it_type)); mp_obj_dict_view_it_t *self = MP_OBJ_TO_PTR(self_in); mp_map_elem_t *next = dict_iter_next(MP_OBJ_TO_PTR(self->dict), &self->cur); @@ -430,7 +433,7 @@ STATIC const mp_obj_type_t dict_view_it_type = { STATIC mp_obj_t dict_view_getiter(mp_obj_t view_in, mp_obj_iter_buf_t *iter_buf) { assert(sizeof(mp_obj_dict_view_it_t) <= sizeof(mp_obj_iter_buf_t)); - mp_check_self(MP_OBJ_IS_TYPE(view_in, &dict_view_type)); + mp_check_self(mp_obj_is_type(view_in, &dict_view_type)); mp_obj_dict_view_t *view = MP_OBJ_TO_PTR(view_in); mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t*)iter_buf; o->base.type = &dict_view_it_type; @@ -442,7 +445,7 @@ STATIC mp_obj_t dict_view_getiter(mp_obj_t view_in, mp_obj_iter_buf_t *iter_buf) STATIC void dict_view_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { (void)kind; - mp_check_self(MP_OBJ_IS_TYPE(self_in, &dict_view_type)); + mp_check_self(mp_obj_is_type(self_in, &dict_view_type)); mp_obj_dict_view_t *self = MP_OBJ_TO_PTR(self_in); bool first = true; mp_print_str(print, mp_dict_view_names[self->kind]); @@ -489,7 +492,7 @@ STATIC mp_obj_t mp_obj_new_dict_view(mp_obj_t dict, mp_dict_view_kind_t kind) { } STATIC mp_obj_t dict_view(mp_obj_t self_in, mp_dict_view_kind_t kind) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); + mp_check_self(mp_obj_is_dict_type(self_in)); return mp_obj_new_dict_view(self_in, kind); } @@ -513,7 +516,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_values_obj, dict_values); STATIC mp_obj_t dict_getiter(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf) { assert(sizeof(mp_obj_dict_view_it_t) <= sizeof(mp_obj_iter_buf_t)); - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); + mp_check_self(mp_obj_is_dict_type(self_in)); mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t*)iter_buf; o->base.type = &dict_view_it_type; o->kind = MP_DICT_VIEW_KEYS; @@ -528,7 +531,9 @@ STATIC mp_obj_t dict_getiter(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf) { STATIC const mp_rom_map_elem_t dict_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&dict_clear_obj) }, { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&dict_copy_obj) }, + #if MICROPY_PY_BUILTINS_DICT_FROMKEYS { MP_ROM_QSTR(MP_QSTR_fromkeys), MP_ROM_PTR(&dict_fromkeys_obj) }, + #endif { MP_ROM_QSTR(MP_QSTR_get), MP_ROM_PTR(&dict_get_obj) }, { MP_ROM_QSTR(MP_QSTR_items), MP_ROM_PTR(&dict_items_obj) }, { MP_ROM_QSTR(MP_QSTR_keys), MP_ROM_PTR(&dict_keys_obj) }, @@ -588,7 +593,7 @@ size_t mp_obj_dict_len(mp_obj_t self_in) { } mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); + mp_check_self(mp_obj_is_dict_type(self_in)); mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in); mp_ensure_not_fixed(self); mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value; @@ -600,9 +605,3 @@ mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key) { dict_get_helper(2, args, MP_MAP_LOOKUP_REMOVE_IF_FOUND); return self_in; } - -mp_map_t *mp_obj_dict_get_map(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_DICT_TYPE(self_in)); - mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in); - return &self->map; -} diff --git a/python/src/py/objenumerate.c b/python/src/py/objenumerate.c index 1a9d30f83..493e45c2a 100644 --- a/python/src/py/objenumerate.c +++ b/python/src/py/objenumerate.c @@ -78,7 +78,7 @@ const mp_obj_type_t mp_type_enumerate = { }; STATIC mp_obj_t enumerate_iternext(mp_obj_t self_in) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_enumerate)); + assert(mp_obj_is_type(self_in, &mp_type_enumerate)); mp_obj_enumerate_t *self = MP_OBJ_TO_PTR(self_in); mp_obj_t next = mp_iternext(self->iter); if (next == MP_OBJ_STOP_ITERATION) { diff --git a/python/src/py/objexcept.c b/python/src/py/objexcept.c index 1e746bc81..1fb636f66 100644 --- a/python/src/py/objexcept.c +++ b/python/src/py/objexcept.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2016 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -40,12 +41,23 @@ // Number of items per traceback entry (file, line, block) #define TRACEBACK_ENTRY_LEN (3) -// Number of traceback entries to reserve in the emergency exception buffer -#define EMG_TRACEBACK_ALLOC (2 * TRACEBACK_ENTRY_LEN) - -// Optionally allocated buffer for storing the first argument of an exception -// allocated when the heap is locked. +// Optionally allocated buffer for storing some traceback, the tuple argument, +// and possible string object and data, for when the heap is locked. #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF + +// When used the layout of the emergency exception buffer is: +// - traceback entry (file, line, block) +// - traceback entry (file, line, block) +// - mp_obj_tuple_t object +// - n_args * mp_obj_t for tuple +// - mp_obj_str_t object +// - string data +#define EMG_BUF_TRACEBACK_OFFSET (0) +#define EMG_BUF_TRACEBACK_SIZE (2 * TRACEBACK_ENTRY_LEN * sizeof(size_t)) +#define EMG_BUF_TUPLE_OFFSET (EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE) +#define EMG_BUF_TUPLE_SIZE(n_args) (sizeof(mp_obj_tuple_t) + n_args * sizeof(mp_obj_t)) +#define EMG_BUF_STR_OFFSET (EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(1)) + # if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0 #define mp_emergency_exception_buf_size MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE @@ -112,7 +124,7 @@ void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kin } else if (o->args->len == 1) { #if MICROPY_PY_UERRNO // try to provide a nice OSError error message - if (o->base.type == &mp_type_OSError && MP_OBJ_IS_SMALL_INT(o->args->items[0])) { + if (o->base.type == &mp_type_OSError && mp_obj_is_small_int(o->args->items[0])) { qstr qst = mp_errno_to_str(o->args->items[0]); if (qst != MP_QSTR_NULL) { mp_printf(print, "[Errno " INT_FMT "] %q", MP_OBJ_SMALL_INT_VALUE(o->args->items[0]), qst); @@ -153,9 +165,9 @@ mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type, size_t n_args, siz // reserved room (after the traceback data) for a tuple with 1 element. // Otherwise we are free to use the whole buffer after the traceback data. if (o_tuple == NULL && mp_emergency_exception_buf_size >= - EMG_TRACEBACK_ALLOC * sizeof(size_t) + sizeof(mp_obj_tuple_t) + n_args * sizeof(mp_obj_t)) { + EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(n_args)) { o_tuple = (mp_obj_tuple_t*) - ((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) + EMG_TRACEBACK_ALLOC * sizeof(size_t)); + ((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_TUPLE_OFFSET); } #endif @@ -313,7 +325,35 @@ mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, size_t n_args, } mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg) { - return mp_obj_new_exception_msg_varg(exc_type, msg); + // Check that the given type is an exception type + assert(exc_type->make_new == mp_obj_exception_make_new); + + // Try to allocate memory for the message + mp_obj_str_t *o_str = m_new_obj_maybe(mp_obj_str_t); + + #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF + // If memory allocation failed and there is an emergency buffer then try to use + // that buffer to store the string object, reserving room at the start for the + // traceback and 1-tuple. + if (o_str == NULL + && mp_emergency_exception_buf_size >= EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t)) { + o_str = (mp_obj_str_t*)((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) + + EMG_BUF_STR_OFFSET); + } + #endif + + if (o_str == NULL) { + // No memory for the string object so create the exception with no args + return mp_obj_exception_make_new(exc_type, 0, 0, NULL); + } + + // Create the string object and call mp_obj_exception_make_new to create the exception + o_str->base.type = &mp_type_str; + o_str->len = strlen(msg); + o_str->data = (const byte*)msg; + o_str->hash = qstr_compute_hash(o_str->data, o_str->len); + mp_obj_t arg = MP_OBJ_FROM_PTR(o_str); + return mp_obj_exception_make_new(exc_type, 1, 0, &arg); } // The following struct and function implement a simple printer that conservatively @@ -366,11 +406,10 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char // that buffer to store the string object and its data (at least 16 bytes for // the string data), reserving room at the start for the traceback and 1-tuple. if ((o_str == NULL || o_str_buf == NULL) - && mp_emergency_exception_buf_size >= EMG_TRACEBACK_ALLOC * sizeof(size_t) - + sizeof(mp_obj_tuple_t) + sizeof(mp_obj_t) + sizeof(mp_obj_str_t) + 16) { + && mp_emergency_exception_buf_size >= EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t) + 16) { used_emg_buf = true; o_str = (mp_obj_str_t*)((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) - + EMG_TRACEBACK_ALLOC * sizeof(size_t) + sizeof(mp_obj_tuple_t) + sizeof(mp_obj_t)); + + EMG_BUF_STR_OFFSET); o_str_buf = (byte*)&o_str[1]; o_str_alloc = (uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size - o_str_buf; @@ -409,7 +448,7 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char // return true if the given object is an exception type bool mp_obj_is_exception_type(mp_obj_t self_in) { - if (MP_OBJ_IS_TYPE(self_in, &mp_type_type)) { + if (mp_obj_is_type(self_in, &mp_type_type)) { // optimisation when self_in is a builtin exception mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in); if (self->make_new == mp_obj_exception_make_new) { @@ -464,11 +503,12 @@ void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qs self->traceback_data = m_new_maybe(size_t, TRACEBACK_ENTRY_LEN); if (self->traceback_data == NULL) { #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF - if (mp_emergency_exception_buf_size >= EMG_TRACEBACK_ALLOC * sizeof(size_t)) { + if (mp_emergency_exception_buf_size >= EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE) { // There is room in the emergency buffer for traceback data - size_t *tb = (size_t*)MP_STATE_VM(mp_emergency_exception_buf); + size_t *tb = (size_t*)((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) + + EMG_BUF_TRACEBACK_OFFSET); self->traceback_data = tb; - self->traceback_alloc = EMG_TRACEBACK_ALLOC; + self->traceback_alloc = EMG_BUF_TRACEBACK_SIZE / sizeof(size_t); } else { // Can't allocate and no room in emergency buffer return; diff --git a/python/src/py/objfilter.c b/python/src/py/objfilter.c index cb965d8c3..41b2a3bc5 100644 --- a/python/src/py/objfilter.c +++ b/python/src/py/objfilter.c @@ -44,7 +44,7 @@ STATIC mp_obj_t filter_make_new(const mp_obj_type_t *type, size_t n_args, size_t } STATIC mp_obj_t filter_iternext(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_filter)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_filter)); mp_obj_filter_t *self = MP_OBJ_TO_PTR(self_in); mp_obj_t next; while ((next = mp_iternext(self->iter)) != MP_OBJ_STOP_ITERATION) { diff --git a/python/src/py/objfloat.c b/python/src/py/objfloat.c index e4d5a6570..3da549bb2 100644 --- a/python/src/py/objfloat.c +++ b/python/src/py/objfloat.c @@ -88,7 +88,7 @@ typedef uint32_t mp_float_uint_t; if (adj_exp <= MP_FLOAT_FRAC_BITS) { // number may have a fraction; xor the integer part with the fractional part val = (frc >> (MP_FLOAT_FRAC_BITS - adj_exp)) - ^ (frc & ((1 << (MP_FLOAT_FRAC_BITS - adj_exp)) - 1)); + ^ (frc & (((mp_float_uint_t)1 << (MP_FLOAT_FRAC_BITS - adj_exp)) - 1)); } else if ((unsigned int)adj_exp < BITS_PER_BYTE * sizeof(mp_int_t) - 1) { // the number is a (big) whole integer and will fit in val's signed-width val = (mp_int_t)frc << (adj_exp - MP_FLOAT_FRAC_BITS); @@ -99,7 +99,7 @@ typedef uint32_t mp_float_uint_t; } if (u.p.sgn) { - val = -val; + val = -(mp_uint_t)val; } return val; @@ -161,8 +161,7 @@ STATIC mp_obj_t float_unary_op(mp_unary_op_t op, mp_obj_t o_in) { case MP_UNARY_OP_POSITIVE: return o_in; case MP_UNARY_OP_NEGATIVE: return mp_obj_new_float(-val); case MP_UNARY_OP_ABS: { - // TODO check for NaN etc - if (val < 0) { + if (signbit(val)) { return mp_obj_new_float(-val); } else { return o_in; @@ -175,7 +174,7 @@ STATIC mp_obj_t float_unary_op(mp_unary_op_t op, mp_obj_t o_in) { STATIC mp_obj_t float_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) { mp_float_t lhs_val = mp_obj_float_get(lhs_in); #if MICROPY_PY_BUILTINS_COMPLEX - if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_complex)) { + if (mp_obj_is_type(rhs_in, &mp_type_complex)) { return mp_obj_complex_binary_op(op, lhs_val, 0, rhs_in); } else #endif @@ -262,7 +261,7 @@ mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE: if (rhs_val == 0) { zero_division_error: - mp_raise_msg(&mp_type_ZeroDivisionError, "division by zero"); + mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero"); } // Python specs require that x == (x//y)*y + (x%y) so we must // call divmod to compute the correct floor division, which diff --git a/python/src/py/objfun.c b/python/src/py/objfun.c index e6d33d287..d96c79ede 100644 --- a/python/src/py/objfun.c +++ b/python/src/py/objfun.c @@ -50,7 +50,7 @@ STATIC mp_obj_t fun_builtin_0_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { (void)args; - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_fun_builtin_0)); + assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_0)); mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in); mp_arg_check_num(n_args, n_kw, 0, 0, false); return self->fun._0(); @@ -64,7 +64,7 @@ const mp_obj_type_t mp_type_fun_builtin_0 = { }; STATIC mp_obj_t fun_builtin_1_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_fun_builtin_1)); + assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_1)); mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in); mp_arg_check_num(n_args, n_kw, 1, 1, false); return self->fun._1(args[0]); @@ -78,7 +78,7 @@ const mp_obj_type_t mp_type_fun_builtin_1 = { }; STATIC mp_obj_t fun_builtin_2_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_fun_builtin_2)); + assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_2)); mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in); mp_arg_check_num(n_args, n_kw, 2, 2, false); return self->fun._2(args[0], args[1]); @@ -92,7 +92,7 @@ const mp_obj_type_t mp_type_fun_builtin_2 = { }; STATIC mp_obj_t fun_builtin_3_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_fun_builtin_3)); + assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_3)); mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in); mp_arg_check_num(n_args, n_kw, 3, 3, false); return self->fun._3(args[0], args[1], args[2]); @@ -106,13 +106,13 @@ const mp_obj_type_t mp_type_fun_builtin_3 = { }; STATIC mp_obj_t fun_builtin_var_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_fun_builtin_var)); + assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_var)); mp_obj_fun_builtin_var_t *self = MP_OBJ_TO_PTR(self_in); // check number of arguments - mp_arg_check_num(n_args, n_kw, self->n_args_min, self->n_args_max, self->is_kw); + mp_arg_check_num_sig(n_args, n_kw, self->sig); - if (self->is_kw) { + if (self->sig & 1) { // function allows keywords // we create a map directly from the given args array @@ -154,7 +154,7 @@ STATIC const mp_obj_type_t mp_type_fun_native; qstr mp_obj_fun_get_name(mp_const_obj_t fun_in) { const mp_obj_fun_bc_t *fun = MP_OBJ_TO_PTR(fun_in); #if MICROPY_EMIT_NATIVE - if (fun->base.type == &mp_type_fun_native) { + if (fun->base.type == &mp_type_fun_native || fun->base.type == &mp_type_native_gen_wrap) { // TODO native functions don't have name stored return MP_QSTR_; } @@ -195,19 +195,15 @@ STATIC void dump_args(const mp_obj_t *a, size_t sz) { // than this will try to use the heap, with fallback to stack allocation. #define VM_MAX_STATE_ON_STACK (11 * sizeof(mp_uint_t)) -// Set this to 1 to enable a simple stack overflow check. -#define VM_DETECT_STACK_OVERFLOW (0) - #define DECODE_CODESTATE_SIZE(bytecode, n_state_out_var, state_size_out_var) \ { \ /* bytecode prelude: state size and exception stack size */ \ n_state_out_var = mp_decode_uint_value(bytecode); \ size_t n_exc_stack = mp_decode_uint_value(mp_decode_uint_skip(bytecode)); \ \ - n_state += VM_DETECT_STACK_OVERFLOW; \ - \ /* state size in bytes */ \ - state_size_out_var = n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t); \ + state_size_out_var = n_state_out_var * sizeof(mp_obj_t) \ + + n_exc_stack * sizeof(mp_exc_stack_t); \ } #define INIT_CODESTATE(code_state, _fun_bc, n_args, n_kw, args) \ @@ -256,8 +252,8 @@ STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const dump_args(args, n_args); DEBUG_printf("Input kw args: "); dump_args(args + n_args, n_kw * 2); + mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in); - DEBUG_printf("Func n_def_args: %d\n", self->n_def_args); size_t n_state, state_size; DECODE_CODESTATE_SIZE(self->bytecode, n_state, state_size); @@ -269,9 +265,17 @@ STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const #else if (state_size > VM_MAX_STATE_ON_STACK) { code_state = m_new_obj_var_maybe(mp_code_state_t, byte, state_size); + #if MICROPY_DEBUG_VM_STACK_OVERFLOW + if (code_state != NULL) { + memset(code_state->state, 0, state_size); + } + #endif } if (code_state == NULL) { code_state = alloca(sizeof(mp_code_state_t) + state_size); + #if MICROPY_DEBUG_VM_STACK_OVERFLOW + memset(code_state->state, 0, state_size); + #endif state_size = 0; // indicate that we allocated using alloca } #endif @@ -283,31 +287,34 @@ STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL); mp_globals_set(code_state->old_globals); -#if VM_DETECT_STACK_OVERFLOW + #if MICROPY_DEBUG_VM_STACK_OVERFLOW if (vm_return_kind == MP_VM_RETURN_NORMAL) { if (code_state->sp < code_state->state) { - printf("VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state); + mp_printf(MICROPY_DEBUG_PRINTER, "VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state); assert(0); } } - // We can't check the case when an exception is returned in state[n_state - 1] + const byte *bytecode_ptr = mp_decode_uint_skip(mp_decode_uint_skip(self->bytecode)); + size_t n_pos_args = bytecode_ptr[1]; + size_t n_kwonly_args = bytecode_ptr[2]; + // We can't check the case when an exception is returned in state[0] // and there are no arguments, because in this case our detection slot may have // been overwritten by the returned exception (which is allowed). - if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && self->n_pos_args + self->n_kwonly_args == 0)) { + if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && n_pos_args + n_kwonly_args == 0)) { // Just check to see that we have at least 1 null object left in the state. bool overflow = true; - for (size_t i = 0; i < n_state - self->n_pos_args - self->n_kwonly_args; i++) { + for (size_t i = 0; i < n_state - n_pos_args - n_kwonly_args; ++i) { if (code_state->state[i] == MP_OBJ_NULL) { overflow = false; break; } } if (overflow) { - printf("VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state); + mp_printf(MICROPY_DEBUG_PRINTER, "VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state); assert(0); } } -#endif + #endif mp_obj_t result; if (vm_return_kind == MP_VM_RETURN_NORMAL) { @@ -316,8 +323,8 @@ STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const } else { // must be an exception because normal functions can't yield assert(vm_return_kind == MP_VM_RETURN_EXCEPTION); - // return value is in fastn[0]==state[n_state - 1] - result = code_state->state[n_state - 1]; + // returned exception is in state[0] + result = code_state->state[0]; } #if MICROPY_ENABLE_PYSTACK @@ -337,7 +344,7 @@ STATIC mp_obj_t fun_bc_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const } #if MICROPY_PY_FUNCTION_ATTRS -STATIC void fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { +void mp_obj_fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { if (dest[0] != MP_OBJ_NULL) { // not load attribute return; @@ -357,7 +364,7 @@ const mp_obj_type_t mp_type_fun_bc = { .call = fun_bc_call, .unary_op = mp_generic_unary_op, #if MICROPY_PY_FUNCTION_ATTRS - .attr = fun_bc_attr, + .attr = mp_obj_fun_bc_attr, #endif }; @@ -366,7 +373,7 @@ mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args_in, mp_obj_t def_kw_args, const byt size_t n_extra_args = 0; mp_obj_tuple_t *def_args = MP_OBJ_TO_PTR(def_args_in); if (def_args_in != MP_OBJ_NULL) { - assert(MP_OBJ_IS_TYPE(def_args_in, &mp_type_tuple)); + assert(mp_obj_is_type(def_args_in, &mp_type_tuple)); n_def_args = def_args->len; n_extra_args = def_args->len; } @@ -414,72 +421,6 @@ mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const #endif // MICROPY_EMIT_NATIVE -/******************************************************************************/ -/* viper functions */ - -#if MICROPY_EMIT_NATIVE - -typedef struct _mp_obj_fun_viper_t { - mp_obj_base_t base; - size_t n_args; - void *fun_data; // GC must be able to trace this pointer - mp_uint_t type_sig; -} mp_obj_fun_viper_t; - -typedef mp_uint_t (*viper_fun_0_t)(void); -typedef mp_uint_t (*viper_fun_1_t)(mp_uint_t); -typedef mp_uint_t (*viper_fun_2_t)(mp_uint_t, mp_uint_t); -typedef mp_uint_t (*viper_fun_3_t)(mp_uint_t, mp_uint_t, mp_uint_t); -typedef mp_uint_t (*viper_fun_4_t)(mp_uint_t, mp_uint_t, mp_uint_t, mp_uint_t); - -STATIC mp_obj_t fun_viper_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - mp_obj_fun_viper_t *self = self_in; - - mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false); - - void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data); - - mp_uint_t ret; - if (n_args == 0) { - ret = ((viper_fun_0_t)fun)(); - } else if (n_args == 1) { - ret = ((viper_fun_1_t)fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 4)); - } else if (n_args == 2) { - ret = ((viper_fun_2_t)fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 4), mp_convert_obj_to_native(args[1], self->type_sig >> 8)); - } else if (n_args == 3) { - ret = ((viper_fun_3_t)fun)(mp_convert_obj_to_native(args[0], self->type_sig >> 4), mp_convert_obj_to_native(args[1], self->type_sig >> 8), mp_convert_obj_to_native(args[2], self->type_sig >> 12)); - } else { - // compiler allows at most 4 arguments - assert(n_args == 4); - ret = ((viper_fun_4_t)fun)( - mp_convert_obj_to_native(args[0], self->type_sig >> 4), - mp_convert_obj_to_native(args[1], self->type_sig >> 8), - mp_convert_obj_to_native(args[2], self->type_sig >> 12), - mp_convert_obj_to_native(args[3], self->type_sig >> 16) - ); - } - - return mp_convert_native_to_obj(ret, self->type_sig); -} - -STATIC const mp_obj_type_t mp_type_fun_viper = { - { &mp_type_type }, - .name = MP_QSTR_function, - .call = fun_viper_call, - .unary_op = mp_generic_unary_op, -}; - -mp_obj_t mp_obj_new_fun_viper(size_t n_args, void *fun_data, mp_uint_t type_sig) { - mp_obj_fun_viper_t *o = m_new_obj(mp_obj_fun_viper_t); - o->base.type = &mp_type_fun_viper; - o->n_args = n_args; - o->fun_data = fun_data; - o->type_sig = type_sig; - return o; -} - -#endif // MICROPY_EMIT_NATIVE - /******************************************************************************/ /* inline assembler functions */ @@ -488,7 +429,7 @@ mp_obj_t mp_obj_new_fun_viper(size_t n_args, void *fun_data, mp_uint_t type_sig) typedef struct _mp_obj_fun_asm_t { mp_obj_base_t base; size_t n_args; - void *fun_data; // GC must be able to trace this pointer + const void *fun_data; // GC must be able to trace this pointer mp_uint_t type_sig; } mp_obj_fun_asm_t; @@ -501,7 +442,7 @@ typedef mp_uint_t (*inline_asm_fun_4_t)(mp_uint_t, mp_uint_t, mp_uint_t, mp_uint // convert a MicroPython object to a sensible value for inline asm STATIC mp_uint_t convert_obj_for_inline_asm(mp_obj_t obj) { // TODO for byte_array, pass pointer to the array - if (MP_OBJ_IS_SMALL_INT(obj)) { + if (mp_obj_is_small_int(obj)) { return MP_OBJ_SMALL_INT_VALUE(obj); } else if (obj == mp_const_none) { return 0; @@ -509,21 +450,21 @@ STATIC mp_uint_t convert_obj_for_inline_asm(mp_obj_t obj) { return 0; } else if (obj == mp_const_true) { return 1; - } else if (MP_OBJ_IS_TYPE(obj, &mp_type_int)) { + } else if (mp_obj_is_type(obj, &mp_type_int)) { return mp_obj_int_get_truncated(obj); - } else if (MP_OBJ_IS_STR(obj)) { + } else if (mp_obj_is_str(obj)) { // pointer to the string (it's probably constant though!) size_t l; return (mp_uint_t)mp_obj_str_get_data(obj, &l); } else { mp_obj_type_t *type = mp_obj_get_type(obj); - if (0) { #if MICROPY_PY_BUILTINS_FLOAT - } else if (type == &mp_type_float) { + if (type == &mp_type_float) { // convert float to int (could also pass in float registers) return (mp_int_t)mp_obj_float_get(obj); + } else #endif - } else if (type == &mp_type_tuple || type == &mp_type_list) { + if (type == &mp_type_tuple || type == &mp_type_list) { // pointer to start of tuple (could pass length, but then could use len(x) for that) size_t len; mp_obj_t *items; @@ -547,7 +488,7 @@ STATIC mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false); - void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data); + const void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data); mp_uint_t ret; if (n_args == 0) { @@ -569,7 +510,7 @@ STATIC mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const ); } - return mp_convert_native_to_obj(ret, self->type_sig); + return mp_native_to_obj(ret, self->type_sig); } STATIC const mp_obj_type_t mp_type_fun_asm = { @@ -579,7 +520,7 @@ STATIC const mp_obj_type_t mp_type_fun_asm = { .unary_op = mp_generic_unary_op, }; -mp_obj_t mp_obj_new_fun_asm(size_t n_args, void *fun_data, mp_uint_t type_sig) { +mp_obj_t mp_obj_new_fun_asm(size_t n_args, const void *fun_data, mp_uint_t type_sig) { mp_obj_fun_asm_t *o = m_new_obj(mp_obj_fun_asm_t); o->base.type = &mp_type_fun_asm; o->n_args = n_args; diff --git a/python/src/py/objfun.h b/python/src/py/objfun.h index fbb351626..257b8a65a 100644 --- a/python/src/py/objfun.h +++ b/python/src/py/objfun.h @@ -41,4 +41,6 @@ typedef struct _mp_obj_fun_bc_t { mp_obj_t extra_args[]; } mp_obj_fun_bc_t; +void mp_obj_fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest); + #endif // MICROPY_INCLUDED_PY_OBJFUN_H diff --git a/python/src/py/objgenerator.c b/python/src/py/objgenerator.c index 5fd13f831..29c7cb16d 100644 --- a/python/src/py/objgenerator.c +++ b/python/src/py/objgenerator.c @@ -3,7 +3,7 @@ * * The MIT License (MIT) * - * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2013-2019 Damien P. George * Copyright (c) 2014-2017 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy @@ -37,11 +37,6 @@ /******************************************************************************/ /* generator wrapper */ -typedef struct _mp_obj_gen_wrap_t { - mp_obj_base_t base; - mp_obj_t *fun; -} mp_obj_gen_wrap_t; - typedef struct _mp_obj_gen_instance_t { mp_obj_base_t base; mp_obj_dict_t *globals; @@ -49,9 +44,8 @@ typedef struct _mp_obj_gen_instance_t { } mp_obj_gen_instance_t; STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { - mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in); - mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t*)self->fun; - assert(self_fun->base.type == &mp_type_fun_bc); + // A generating function is just a bytecode function with type mp_type_gen_wrap + mp_obj_fun_bc_t *self_fun = MP_OBJ_TO_PTR(self_in); // bytecode prelude: get state size and exception stack size size_t n_state = mp_decode_uint_value(self_fun->bytecode); @@ -74,15 +68,58 @@ const mp_obj_type_t mp_type_gen_wrap = { .name = MP_QSTR_generator, .call = gen_wrap_call, .unary_op = mp_generic_unary_op, + #if MICROPY_PY_FUNCTION_ATTRS + .attr = mp_obj_fun_bc_attr, + #endif }; -mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun) { - mp_obj_gen_wrap_t *o = m_new_obj(mp_obj_gen_wrap_t); - o->base.type = &mp_type_gen_wrap; - o->fun = MP_OBJ_TO_PTR(fun); +/******************************************************************************/ +// native generator wrapper + +#if MICROPY_EMIT_NATIVE + +STATIC mp_obj_t native_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) { + // The state for a native generating function is held in the same struct as a bytecode function + mp_obj_fun_bc_t *self_fun = MP_OBJ_TO_PTR(self_in); + + // Determine start of prelude, and extract n_state from it + uintptr_t prelude_offset = ((uintptr_t*)self_fun->bytecode)[0]; + size_t n_state = mp_decode_uint_value(self_fun->bytecode + prelude_offset); + size_t n_exc_stack = 0; + + // Allocate the generator object, with room for local stack and exception stack + mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte, + n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t)); + o->base.type = &mp_type_gen_instance; + + // Parse the input arguments and set up the code state + o->globals = self_fun->globals; + o->code_state.fun_bc = self_fun; + o->code_state.ip = (const byte*)prelude_offset; + mp_setup_code_state(&o->code_state, n_args, n_kw, args); + + // Indicate we are a native function, which doesn't use this variable + o->code_state.exc_sp = NULL; + + // Prepare the generator instance for execution + uintptr_t start_offset = ((uintptr_t*)self_fun->bytecode)[1]; + o->code_state.ip = MICROPY_MAKE_POINTER_CALLABLE((void*)(self_fun->bytecode + start_offset)); + return MP_OBJ_FROM_PTR(o); } +const mp_obj_type_t mp_type_native_gen_wrap = { + { &mp_type_type }, + .name = MP_QSTR_generator, + .call = native_gen_wrap_call, + .unary_op = mp_generic_unary_op, + #if MICROPY_PY_FUNCTION_ATTRS + .attr = mp_obj_fun_bc_attr, + #endif +}; + +#endif // MICROPY_EMIT_NATIVE + /******************************************************************************/ /* generator instance */ @@ -94,7 +131,7 @@ STATIC void gen_instance_print(const mp_print_t *print, mp_obj_t self_in, mp_pri mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) { MP_STACK_CHECK(); - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_gen_instance)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_gen_instance)); mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in); if (self->code_state.ip == 0) { // Trying to resume already stopped generator @@ -117,10 +154,35 @@ mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_ *self->code_state.sp = send_value; } } - mp_obj_dict_t *old_globals = mp_globals_get(); + + // We set self->globals=NULL while executing, for a sentinel to ensure the generator + // cannot be reentered during execution + if (self->globals == NULL) { + mp_raise_ValueError("generator already executing"); + } + + // Set up the correct globals context for the generator and execute it + self->code_state.old_globals = mp_globals_get(); mp_globals_set(self->globals); - mp_vm_return_kind_t ret_kind = mp_execute_bytecode(&self->code_state, throw_value); - mp_globals_set(old_globals); + self->globals = NULL; + + mp_vm_return_kind_t ret_kind; + + #if MICROPY_EMIT_NATIVE + if (self->code_state.exc_sp == NULL) { + // A native generator, with entry point 2 words into the "bytecode" pointer + typedef uintptr_t (*mp_fun_native_gen_t)(void*, mp_obj_t); + mp_fun_native_gen_t fun = MICROPY_MAKE_POINTER_CALLABLE((const void*)(self->code_state.fun_bc->bytecode + 2 * sizeof(uintptr_t))); + ret_kind = fun((void*)&self->code_state, throw_value); + } else + #endif + { + // A bytecode generator + ret_kind = mp_execute_bytecode(&self->code_state, throw_value); + } + + self->globals = mp_globals_get(); + mp_globals_set(self->code_state.old_globals); switch (ret_kind) { case MP_VM_RETURN_NORMAL: @@ -128,8 +190,6 @@ mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_ // Explicitly mark generator as completed. If we don't do this, // subsequent next() may re-execute statements after last yield // again and again, leading to side effects. - // TODO: check how return with value behaves under such conditions - // in CPython. self->code_state.ip = 0; *ret_val = *self->code_state.sp; break; @@ -142,9 +202,12 @@ mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_ break; case MP_VM_RETURN_EXCEPTION: { - size_t n_state = mp_decode_uint_value(self->code_state.fun_bc->bytecode); self->code_state.ip = 0; - *ret_val = self->code_state.state[n_state - 1]; + *ret_val = self->code_state.state[0]; + // PEP479: if StopIteration is raised inside a generator it is replaced with RuntimeError + if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(*ret_val)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) { + *ret_val = mp_obj_new_exception_msg(&mp_type_RuntimeError, "generator raised StopIteration"); + } break; } } @@ -168,15 +231,6 @@ STATIC mp_obj_t gen_resume_and_raise(mp_obj_t self_in, mp_obj_t send_value, mp_o return ret; case MP_VM_RETURN_EXCEPTION: - // TODO: Optimization of returning MP_OBJ_STOP_ITERATION is really part - // of mp_iternext() protocol, but this function is called by other methods - // too, which may not handled MP_OBJ_STOP_ITERATION. - if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) { - mp_obj_t val = mp_obj_exception_get_value(ret); - if (val == mp_const_none) { - return MP_OBJ_STOP_ITERATION; - } - } nlr_raise(ret); } } @@ -193,12 +247,25 @@ STATIC mp_obj_t gen_instance_send(mp_obj_t self_in, mp_obj_t send_value) { return ret; } } - STATIC MP_DEFINE_CONST_FUN_OBJ_2(gen_instance_send_obj, gen_instance_send); -STATIC mp_obj_t gen_instance_close(mp_obj_t self_in); STATIC mp_obj_t gen_instance_throw(size_t n_args, const mp_obj_t *args) { - mp_obj_t exc = (n_args == 2) ? args[1] : args[2]; + // The signature of this function is: throw(type[, value[, traceback]]) + // CPython will pass all given arguments through the call chain and process them + // at the point they are used (native generators will handle them differently to + // user-defined generators with a throw() method). To save passing multiple + // values, MicroPython instead does partial processing here to reduce it down to + // one argument and passes that through: + // - if only args[1] is given, or args[2] is given but is None, args[1] is + // passed through (in the standard case it is an exception class or instance) + // - if args[2] is given and not None it is passed through (in the standard + // case it would be an exception instance and args[1] its corresponding class) + // - args[3] is always ignored + + mp_obj_t exc = args[1]; + if (n_args > 2 && args[2] != mp_const_none) { + exc = args[2]; + } mp_obj_t ret = gen_resume_and_raise(args[0], mp_const_none, exc); if (ret == MP_OBJ_STOP_ITERATION) { @@ -207,7 +274,6 @@ STATIC mp_obj_t gen_instance_throw(size_t n_args, const mp_obj_t *args) { return ret; } } - STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gen_instance_throw_obj, 2, 4, gen_instance_throw); STATIC mp_obj_t gen_instance_close(mp_obj_t self_in) { @@ -216,11 +282,10 @@ STATIC mp_obj_t gen_instance_close(mp_obj_t self_in) { case MP_VM_RETURN_YIELD: mp_raise_msg(&mp_type_RuntimeError, "generator ignored GeneratorExit"); - // Swallow StopIteration & GeneratorExit (== successful close), and re-raise any other + // Swallow GeneratorExit (== successful close), and re-raise any other case MP_VM_RETURN_EXCEPTION: // ret should always be an instance of an exception class - if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit)) || - mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) { + if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { return mp_const_none; } nlr_raise(ret); @@ -230,7 +295,6 @@ STATIC mp_obj_t gen_instance_close(mp_obj_t self_in) { return mp_const_none; } } - STATIC MP_DEFINE_CONST_FUN_OBJ_1(gen_instance_close_obj, gen_instance_close); STATIC mp_obj_t gen_instance_pend_throw(mp_obj_t self_in, mp_obj_t exc_in) { diff --git a/python/src/py/objint.c b/python/src/py/objint.c index 270e16969..6473767e4 100644 --- a/python/src/py/objint.c +++ b/python/src/py/objint.c @@ -49,10 +49,10 @@ STATIC mp_obj_t mp_obj_int_make_new(const mp_obj_type_t *type_in, size_t n_args, return MP_OBJ_NEW_SMALL_INT(0); case 1: - if (MP_OBJ_IS_INT(args[0])) { + if (mp_obj_is_int(args[0])) { // already an int (small or long), just return it return args[0]; - } else if (MP_OBJ_IS_STR_OR_BYTES(args[0])) { + } else if (mp_obj_is_str_or_bytes(args[0])) { // a string, parse it size_t l; const char *s = mp_obj_str_get_data(args[0], &l); @@ -62,14 +62,12 @@ STATIC mp_obj_t mp_obj_int_make_new(const mp_obj_type_t *type_in, size_t n_args, return mp_obj_new_int_from_float(mp_obj_float_get(args[0])); #endif } else { - // try to convert to small int (eg from bool) - return MP_OBJ_NEW_SMALL_INT(mp_obj_get_int(args[0])); + return mp_unary_op(MP_UNARY_OP_INT, args[0]); } case 2: default: { // should be a string, parse it - // TODO proper error checking of argument types size_t l; const char *s = mp_obj_str_get_data(args[0], &l); return mp_parse_num_integer(s, l, mp_obj_get_int(args[1]), NULL); @@ -226,11 +224,11 @@ char *mp_obj_int_formatted(char **buf, size_t *buf_size, size_t *fmt_size, mp_co // Only have small ints; get the integer value to format. num = MP_OBJ_SMALL_INT_VALUE(self_in); #else - if (MP_OBJ_IS_SMALL_INT(self_in)) { + if (mp_obj_is_small_int(self_in)) { // A small int; get the integer value to format. num = MP_OBJ_SMALL_INT_VALUE(self_in); } else { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + assert(mp_obj_is_type(self_in, &mp_type_int)); // Not a small int. #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG const mp_obj_int_t *self = self_in; @@ -377,7 +375,7 @@ mp_obj_t mp_obj_int_binary_op_extra_cases(mp_binary_op_t op, mp_obj_t lhs_in, mp // true acts as 0 return mp_binary_op(op, lhs_in, MP_OBJ_NEW_SMALL_INT(1)); } else if (op == MP_BINARY_OP_MULTIPLY) { - if (MP_OBJ_IS_STR_OR_BYTES(rhs_in) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(rhs_in, &mp_type_list)) { + if (mp_obj_is_str_or_bytes(rhs_in) || mp_obj_is_type(rhs_in, &mp_type_tuple) || mp_obj_is_type(rhs_in, &mp_type_list)) { // multiply is commutative for these types, so delegate to them return mp_binary_op(op, rhs_in, lhs_in); } @@ -434,7 +432,7 @@ STATIC mp_obj_t int_to_bytes(size_t n_args, const mp_obj_t *args) { memset(data, 0, len); #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE - if (!MP_OBJ_IS_SMALL_INT(args[0])) { + if (!mp_obj_is_small_int(args[0])) { mp_obj_int_to_bytes_impl(args[0], big_endian, len, data); } else #endif diff --git a/python/src/py/objint_longlong.c b/python/src/py/objint_longlong.c index cb8d1672d..37e2d6b50 100644 --- a/python/src/py/objint_longlong.c +++ b/python/src/py/objint_longlong.c @@ -58,7 +58,7 @@ mp_obj_t mp_obj_int_from_bytes_impl(bool big_endian, size_t len, const byte *buf } void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, size_t len, byte *buf) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + assert(mp_obj_is_type(self_in, &mp_type_int)); mp_obj_int_t *self = self_in; long long val = self->val; if (big_endian) { @@ -77,7 +77,7 @@ void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, size_t len, byt int mp_obj_int_sign(mp_obj_t self_in) { mp_longint_impl_t val; - if (MP_OBJ_IS_SMALL_INT(self_in)) { + if (mp_obj_is_small_int(self_in)) { val = MP_OBJ_SMALL_INT_VALUE(self_in); } else { mp_obj_int_t *self = self_in; @@ -122,16 +122,16 @@ mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_i long long lhs_val; long long rhs_val; - if (MP_OBJ_IS_SMALL_INT(lhs_in)) { + if (mp_obj_is_small_int(lhs_in)) { lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs_in); } else { - assert(MP_OBJ_IS_TYPE(lhs_in, &mp_type_int)); + assert(mp_obj_is_type(lhs_in, &mp_type_int)); lhs_val = ((mp_obj_int_t*)lhs_in)->val; } - if (MP_OBJ_IS_SMALL_INT(rhs_in)) { + if (mp_obj_is_small_int(rhs_in)) { rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs_in); - } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_int)) { + } else if (mp_obj_is_type(rhs_in, &mp_type_int)) { rhs_val = ((mp_obj_int_t*)rhs_in)->val; } else { // delegate to generic function to check for extra cases @@ -217,7 +217,7 @@ mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_i } zero_division: - mp_raise_msg(&mp_type_ZeroDivisionError, "division by zero"); + mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero"); } mp_obj_t mp_obj_new_int(mp_int_t value) { @@ -266,7 +266,7 @@ mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, uns } mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) { - if (MP_OBJ_IS_SMALL_INT(self_in)) { + if (mp_obj_is_small_int(self_in)) { return MP_OBJ_SMALL_INT_VALUE(self_in); } else { const mp_obj_int_t *self = self_in; @@ -281,7 +281,7 @@ mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) { #if MICROPY_PY_BUILTINS_FLOAT mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + assert(mp_obj_is_type(self_in, &mp_type_int)); mp_obj_int_t *self = self_in; return self->val; } diff --git a/python/src/py/objint_mpz.c b/python/src/py/objint_mpz.c index 0f05c84f4..121fd0342 100644 --- a/python/src/py/objint_mpz.c +++ b/python/src/py/objint_mpz.c @@ -90,7 +90,7 @@ mp_obj_int_t *mp_obj_int_new_mpz(void) { // This particular routine should only be called for the mpz representation of the int. char *mp_obj_int_formatted_impl(char **buf, size_t *buf_size, size_t *fmt_size, mp_const_obj_t self_in, int base, const char *prefix, char base_char, char comma) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + assert(mp_obj_is_type(self_in, &mp_type_int)); const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); size_t needed_size = mp_int_format_size(mpz_max_num_bits(&self->mpz), base, prefix, comma); @@ -112,14 +112,14 @@ mp_obj_t mp_obj_int_from_bytes_impl(bool big_endian, size_t len, const byte *buf } void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, size_t len, byte *buf) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + assert(mp_obj_is_type(self_in, &mp_type_int)); mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); memset(buf, 0, len); mpz_as_bytes(&self->mpz, big_endian, len, buf); } int mp_obj_int_sign(mp_obj_t self_in) { - if (MP_OBJ_IS_SMALL_INT(self_in)) { + if (mp_obj_is_small_int(self_in)) { mp_int_t val = MP_OBJ_SMALL_INT_VALUE(self_in); if (val < 0) { return -1; @@ -167,25 +167,25 @@ mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_i mpz_dig_t z_int_dig[MPZ_NUM_DIG_FOR_INT]; // lhs could be a small int (eg small-int + mpz) - if (MP_OBJ_IS_SMALL_INT(lhs_in)) { + if (mp_obj_is_small_int(lhs_in)) { mpz_init_fixed_from_int(&z_int, z_int_dig, MPZ_NUM_DIG_FOR_INT, MP_OBJ_SMALL_INT_VALUE(lhs_in)); zlhs = &z_int; } else { - assert(MP_OBJ_IS_TYPE(lhs_in, &mp_type_int)); + assert(mp_obj_is_type(lhs_in, &mp_type_int)); zlhs = &((mp_obj_int_t*)MP_OBJ_TO_PTR(lhs_in))->mpz; } // if rhs is small int, then lhs was not (otherwise mp_binary_op handles it) - if (MP_OBJ_IS_SMALL_INT(rhs_in)) { + if (mp_obj_is_small_int(rhs_in)) { mpz_init_fixed_from_int(&z_int, z_int_dig, MPZ_NUM_DIG_FOR_INT, MP_OBJ_SMALL_INT_VALUE(rhs_in)); zrhs = &z_int; - } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_int)) { + } else if (mp_obj_is_type(rhs_in, &mp_type_int)) { zrhs = &((mp_obj_int_t*)MP_OBJ_TO_PTR(rhs_in))->mpz; #if MICROPY_PY_BUILTINS_FLOAT } else if (mp_obj_is_float(rhs_in)) { return mp_obj_float_binary_op(op, mpz_as_float(zlhs), rhs_in); #if MICROPY_PY_BUILTINS_COMPLEX - } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_complex)) { + } else if (mp_obj_is_type(rhs_in, &mp_type_complex)) { return mp_obj_complex_binary_op(op, mpz_as_float(zlhs), 0, rhs_in); #endif #endif @@ -194,18 +194,18 @@ mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_i return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in); } - if (0) { #if MICROPY_PY_BUILTINS_FLOAT - } else if (op == MP_BINARY_OP_TRUE_DIVIDE || op == MP_BINARY_OP_INPLACE_TRUE_DIVIDE) { + if (op == MP_BINARY_OP_TRUE_DIVIDE || op == MP_BINARY_OP_INPLACE_TRUE_DIVIDE) { if (mpz_is_zero(zrhs)) { goto zero_division_error; } mp_float_t flhs = mpz_as_float(zlhs); mp_float_t frhs = mpz_as_float(zrhs); return mp_obj_new_float(flhs / frhs); + } else #endif - } else if (op >= MP_BINARY_OP_INPLACE_OR && op < MP_BINARY_OP_CONTAINS) { + if (op >= MP_BINARY_OP_INPLACE_OR && op < MP_BINARY_OP_CONTAINS) { mp_obj_int_t *res = mp_obj_int_new_mpz(); switch (op) { @@ -225,7 +225,7 @@ mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_i case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE: { if (mpz_is_zero(zrhs)) { zero_division_error: - mp_raise_msg(&mp_type_ZeroDivisionError, "division by zero"); + mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero"); } mpz_t rem; mpz_init_zero(&rem); mpz_divmod_inpl(&res->mpz, &rem, zlhs, zrhs); @@ -320,7 +320,7 @@ mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_i #if MICROPY_PY_BUILTINS_POW3 STATIC mpz_t *mp_mpz_for_int(mp_obj_t arg, mpz_t *temp) { - if (MP_OBJ_IS_SMALL_INT(arg)) { + if (mp_obj_is_small_int(arg)) { mpz_init_from_int(temp, MP_OBJ_SMALL_INT_VALUE(arg)); return temp; } else { @@ -330,7 +330,7 @@ STATIC mpz_t *mp_mpz_for_int(mp_obj_t arg, mpz_t *temp) { } mp_obj_t mp_obj_int_pow3(mp_obj_t base, mp_obj_t exponent, mp_obj_t modulus) { - if (!MP_OBJ_IS_INT(base) || !MP_OBJ_IS_INT(exponent) || !MP_OBJ_IS_INT(modulus)) { + if (!mp_obj_is_int(base) || !mp_obj_is_int(exponent) || !mp_obj_is_int(modulus)) { mp_raise_TypeError("pow() with 3 arguments requires integers"); } else { mp_obj_t result = mp_obj_new_int_from_ull(0); // Use the _from_ull version as this forces an mpz int @@ -387,7 +387,7 @@ mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, uns } mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) { - if (MP_OBJ_IS_SMALL_INT(self_in)) { + if (mp_obj_is_small_int(self_in)) { return MP_OBJ_SMALL_INT_VALUE(self_in); } else { const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); @@ -397,7 +397,7 @@ mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) { } mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) { - if (MP_OBJ_IS_SMALL_INT(self_in)) { + if (mp_obj_is_small_int(self_in)) { return MP_OBJ_SMALL_INT_VALUE(self_in); } else { const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); @@ -413,7 +413,7 @@ mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) { #if MICROPY_PY_BUILTINS_FLOAT mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_int)); + assert(mp_obj_is_type(self_in, &mp_type_int)); mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in); return mpz_as_float(&self->mpz); } diff --git a/python/src/py/objlist.c b/python/src/py/objlist.c index 1a18f937d..ec9d57fc7 100644 --- a/python/src/py/objlist.c +++ b/python/src/py/objlist.c @@ -104,7 +104,7 @@ STATIC mp_obj_t list_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { mp_obj_list_t *o = MP_OBJ_TO_PTR(lhs); switch (op) { case MP_BINARY_OP_ADD: { - if (!MP_OBJ_IS_TYPE(rhs, &mp_type_list)) { + if (!mp_obj_is_type(rhs, &mp_type_list)) { return MP_OBJ_NULL; // op not supported } mp_obj_list_t *p = MP_OBJ_TO_PTR(rhs); @@ -133,7 +133,7 @@ STATIC mp_obj_t list_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { case MP_BINARY_OP_LESS_EQUAL: case MP_BINARY_OP_MORE: case MP_BINARY_OP_MORE_EQUAL: { - if (!MP_OBJ_IS_TYPE(rhs, &mp_type_list)) { + if (!mp_obj_is_type(rhs, &mp_type_list)) { if (op == MP_BINARY_OP_EQUAL) { return mp_const_false; } @@ -154,7 +154,7 @@ STATIC mp_obj_t list_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { if (value == MP_OBJ_NULL) { // delete #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); mp_bound_slice_t slice; if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) { @@ -178,7 +178,7 @@ STATIC mp_obj_t list_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { // load mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_bound_slice_t slice; if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) { return mp_seq_extract_slice(self->len, self->items, &slice); @@ -192,7 +192,7 @@ STATIC mp_obj_t list_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { return self->items[index_val]; } else { #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); size_t value_len; mp_obj_t *value_items; mp_obj_get_array(value, &value_len, &value_items); @@ -232,7 +232,7 @@ STATIC mp_obj_t list_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) { } mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); if (self->len >= self->alloc) { self->items = m_renew(mp_obj_t, self->items, self->alloc, self->alloc * 2); @@ -244,8 +244,8 @@ mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg) { } STATIC mp_obj_t list_extend(mp_obj_t self_in, mp_obj_t arg_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); - if (MP_OBJ_IS_TYPE(arg_in, &mp_type_list)) { + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); + if (mp_obj_is_type(arg_in, &mp_type_list)) { mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); mp_obj_list_t *arg = MP_OBJ_TO_PTR(arg_in); @@ -265,7 +265,7 @@ STATIC mp_obj_t list_extend(mp_obj_t self_in, mp_obj_t arg_in) { } STATIC mp_obj_t list_pop(size_t n_args, const mp_obj_t *args) { - mp_check_self(MP_OBJ_IS_TYPE(args[0], &mp_type_list)); + mp_check_self(mp_obj_is_type(args[0], &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(args[0]); if (self->len == 0) { mp_raise_msg(&mp_type_IndexError, "pop from empty list"); @@ -325,7 +325,7 @@ mp_obj_t mp_obj_list_sort(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t*)&args); - mp_check_self(MP_OBJ_IS_TYPE(pos_args[0], &mp_type_list)); + mp_check_self(mp_obj_is_type(pos_args[0], &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(pos_args[0]); if (self->len > 1) { @@ -338,7 +338,7 @@ mp_obj_t mp_obj_list_sort(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_ } STATIC mp_obj_t list_clear(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); self->len = 0; self->items = m_renew(mp_obj_t, self->items, self->alloc, LIST_MIN_ALLOC); @@ -348,25 +348,25 @@ STATIC mp_obj_t list_clear(mp_obj_t self_in) { } STATIC mp_obj_t list_copy(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); return mp_obj_new_list(self->len, self->items); } STATIC mp_obj_t list_count(mp_obj_t self_in, mp_obj_t value) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); return mp_seq_count_obj(self->items, self->len, value); } STATIC mp_obj_t list_index(size_t n_args, const mp_obj_t *args) { - mp_check_self(MP_OBJ_IS_TYPE(args[0], &mp_type_list)); + mp_check_self(mp_obj_is_type(args[0], &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(args[0]); return mp_seq_index_obj(self->items, self->len, n_args, args); } STATIC mp_obj_t list_insert(mp_obj_t self_in, mp_obj_t idx, mp_obj_t obj) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); // insert has its own strange index logic mp_int_t index = MP_OBJ_SMALL_INT_VALUE(idx); @@ -391,7 +391,7 @@ STATIC mp_obj_t list_insert(mp_obj_t self_in, mp_obj_t idx, mp_obj_t obj) { } mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_t args[] = {self_in, value}; args[1] = list_index(2, args); list_pop(2, args); @@ -400,7 +400,7 @@ mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value) { } STATIC mp_obj_t list_reverse(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_list)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_list)); mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in); mp_int_t len = self->len; diff --git a/python/src/py/objmap.c b/python/src/py/objmap.c index 908c61507..78c52c892 100644 --- a/python/src/py/objmap.c +++ b/python/src/py/objmap.c @@ -49,7 +49,7 @@ STATIC mp_obj_t map_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_ } STATIC mp_obj_t map_iternext(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_map)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_map)); mp_obj_map_t *self = MP_OBJ_TO_PTR(self_in); mp_obj_t *nextses = m_new(mp_obj_t, self->n_iters); diff --git a/python/src/py/objmodule.c b/python/src/py/objmodule.c index c4aba3a7b..4a07913c5 100644 --- a/python/src/py/objmodule.c +++ b/python/src/py/objmodule.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2015 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -31,6 +32,8 @@ #include "py/runtime.h" #include "py/builtin.h" +#include "genhdr/moduledefs.h" + STATIC void module_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { (void)kind; mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in); @@ -61,6 +64,13 @@ STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP); if (elem != NULL) { dest[0] = elem->value; + #if MICROPY_MODULE_GETATTR + } else if (attr != MP_QSTR___getattr__) { + elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___getattr__), MP_MAP_LOOKUP); + if (elem != NULL) { + dest[0] = mp_call_function_1(elem->value, MP_OBJ_NEW_QSTR(attr)); + } + #endif } } else { // delete/store attribute @@ -84,7 +94,6 @@ STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { mp_obj_dict_delete(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr)); } else { // store attribute - // TODO CPython allows STORE_ATTR to a module, but is this the correct implementation? mp_obj_dict_store(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr), dest[1]); } dest[0] = MP_OBJ_NULL; // indicate success @@ -122,12 +131,6 @@ mp_obj_t mp_obj_new_module(qstr module_name) { return MP_OBJ_FROM_PTR(o); } -mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_module)); - mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in); - return self->globals; -} - /******************************************************************************/ // Global module table and related functions @@ -136,9 +139,6 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { { MP_ROM_QSTR(MP_QSTR_builtins), MP_ROM_PTR(&mp_module_builtins) }, { MP_ROM_QSTR(MP_QSTR_micropython), MP_ROM_PTR(&mp_module_micropython) }, -#if MICROPY_PY_ARRAY - { MP_ROM_QSTR(MP_QSTR_array), MP_ROM_PTR(&mp_module_array) }, -#endif #if MICROPY_PY_IO { MP_ROM_QSTR(MP_QSTR_uio), MP_ROM_PTR(&mp_module_io) }, #endif @@ -193,6 +193,9 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { #if MICROPY_PY_UHASHLIB { MP_ROM_QSTR(MP_QSTR_uhashlib), MP_ROM_PTR(&mp_module_uhashlib) }, #endif +#if MICROPY_PY_UCRYPTOLIB + { MP_ROM_QSTR(MP_QSTR_ucryptolib), MP_ROM_PTR(&mp_module_ucryptolib) }, +#endif #if MICROPY_PY_UBINASCII { MP_ROM_QSTR(MP_QSTR_ubinascii), MP_ROM_PTR(&mp_module_ubinascii) }, #endif @@ -208,8 +211,8 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { #if MICROPY_PY_LWIP { MP_ROM_QSTR(MP_QSTR_lwip), MP_ROM_PTR(&mp_module_lwip) }, #endif -#if MICROPY_PY_WEBSOCKET - { MP_ROM_QSTR(MP_QSTR_websocket), MP_ROM_PTR(&mp_module_websocket) }, +#if MICROPY_PY_UWEBSOCKET + { MP_ROM_QSTR(MP_QSTR_uwebsocket), MP_ROM_PTR(&mp_module_uwebsocket) }, #endif #if MICROPY_PY_WEBREPL { MP_ROM_QSTR(MP_QSTR__webrepl), MP_ROM_PTR(&mp_module_webrepl) }, @@ -223,6 +226,11 @@ STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = { // extra builtin modules as defined by a port MICROPY_PORT_BUILTIN_MODULES + + #ifdef MICROPY_REGISTERED_MODULES + // builtin modules declared with MP_REGISTER_MODULE() + MICROPY_REGISTERED_MODULES + #endif }; MP_DEFINE_CONST_MAP(mp_builtin_module_map, mp_builtin_module_table); diff --git a/python/src/py/objnamedtuple.c b/python/src/py/objnamedtuple.c index e7de899cf..54d8493c3 100644 --- a/python/src/py/objnamedtuple.c +++ b/python/src/py/objnamedtuple.c @@ -172,7 +172,7 @@ STATIC mp_obj_t new_namedtuple_type(mp_obj_t name_in, mp_obj_t fields_in) { size_t n_fields; mp_obj_t *fields; #if MICROPY_CPYTHON_COMPAT - if (MP_OBJ_IS_STR(fields_in)) { + if (mp_obj_is_str(fields_in)) { fields_in = mp_obj_str_split(1, &fields_in); } #endif diff --git a/python/src/py/objobject.c b/python/src/py/objobject.c index 265fcfbf2..45228347e 100644 --- a/python/src/py/objobject.c +++ b/python/src/py/objobject.c @@ -49,7 +49,7 @@ STATIC mp_obj_t object___init__(mp_obj_t self) { STATIC MP_DEFINE_CONST_FUN_OBJ_1(object___init___obj, object___init__); STATIC mp_obj_t object___new__(mp_obj_t cls) { - if (!MP_OBJ_IS_TYPE(cls, &mp_type_type) || !mp_obj_is_instance_type((mp_obj_type_t*)MP_OBJ_TO_PTR(cls))) { + if (!mp_obj_is_type(cls, &mp_type_type) || !mp_obj_is_instance_type((mp_obj_type_t*)MP_OBJ_TO_PTR(cls))) { mp_raise_TypeError("__new__ arg must be a user-type"); } // This executes only "__new__" part of instance creation. diff --git a/python/src/py/objproperty.c b/python/src/py/objproperty.c index b66d24a11..49cb9ca1b 100644 --- a/python/src/py/objproperty.c +++ b/python/src/py/objproperty.c @@ -99,7 +99,7 @@ const mp_obj_type_t mp_type_property = { }; const mp_obj_t *mp_obj_property_get(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_property)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_property)); mp_obj_property_t *self = MP_OBJ_TO_PTR(self_in); return self->proxy; } diff --git a/python/src/py/objrange.c b/python/src/py/objrange.c index 86aa0ccfe..6c3097abd 100644 --- a/python/src/py/objrange.c +++ b/python/src/py/objrange.c @@ -140,7 +140,7 @@ STATIC mp_obj_t range_unary_op(mp_unary_op_t op, mp_obj_t self_in) { #if MICROPY_PY_BUILTINS_RANGE_BINOP STATIC mp_obj_t range_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) { - if (!MP_OBJ_IS_TYPE(rhs_in, &mp_type_range) || op != MP_BINARY_OP_EQUAL) { + if (!mp_obj_is_type(rhs_in, &mp_type_range) || op != MP_BINARY_OP_EQUAL) { return MP_OBJ_NULL; // op not supported } mp_obj_range_t *lhs = MP_OBJ_TO_PTR(lhs_in); @@ -162,7 +162,7 @@ STATIC mp_obj_t range_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in); mp_int_t len = range_len(self); #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_bound_slice_t slice; mp_seq_get_fast_slice_indexes(len, index, &slice); mp_obj_range_t *o = m_new_obj(mp_obj_range_t); diff --git a/python/src/py/objreversed.c b/python/src/py/objreversed.c index e498b553d..4254668e7 100644 --- a/python/src/py/objreversed.c +++ b/python/src/py/objreversed.c @@ -56,7 +56,7 @@ STATIC mp_obj_t reversed_make_new(const mp_obj_type_t *type, size_t n_args, size } STATIC mp_obj_t reversed_iternext(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_reversed)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_reversed)); mp_obj_reversed_t *self = MP_OBJ_TO_PTR(self_in); // "raise" stop iteration if we are at the end (the start) of the sequence diff --git a/python/src/py/objset.c b/python/src/py/objset.c index 799ba9df0..6f5bcc032 100644 --- a/python/src/py/objset.c +++ b/python/src/py/objset.c @@ -45,18 +45,16 @@ typedef struct _mp_obj_set_it_t { size_t cur; } mp_obj_set_it_t; -STATIC mp_obj_t set_it_iternext(mp_obj_t self_in); - STATIC bool is_set_or_frozenset(mp_obj_t o) { - return MP_OBJ_IS_TYPE(o, &mp_type_set) + return mp_obj_is_type(o, &mp_type_set) #if MICROPY_PY_BUILTINS_FROZENSET - || MP_OBJ_IS_TYPE(o, &mp_type_frozenset) + || mp_obj_is_type(o, &mp_type_frozenset) #endif ; } // This macro is shorthand for mp_check_self to verify the argument is a set. -#define check_set(o) mp_check_self(MP_OBJ_IS_TYPE(o, &mp_type_set)) +#define check_set(o) mp_check_self(mp_obj_is_type(o, &mp_type_set)) // This macro is shorthand for mp_check_self to verify the argument is a // set or frozenset for methods that operate on both of these types. @@ -66,7 +64,7 @@ STATIC void set_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t (void)kind; mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in); #if MICROPY_PY_BUILTINS_FROZENSET - bool is_frozen = MP_OBJ_IS_TYPE(self_in, &mp_type_frozenset); + bool is_frozen = mp_obj_is_type(self_in, &mp_type_frozenset); #endif if (self->set.used == 0) { #if MICROPY_PY_BUILTINS_FROZENSET @@ -85,7 +83,7 @@ STATIC void set_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t #endif mp_print_str(print, "{"); for (size_t i = 0; i < self->set.alloc; i++) { - if (MP_SET_SLOT_IS_FILLED(&self->set, i)) { + if (mp_set_slot_is_filled(&self->set, i)) { if (!first) { mp_print_str(print, ", "); } @@ -135,7 +133,7 @@ STATIC mp_obj_t set_it_iternext(mp_obj_t self_in) { mp_set_t *set = &self->set->set; for (size_t i = self->cur; i < max; i++) { - if (MP_SET_SLOT_IS_FILLED(set, i)) { + if (mp_set_slot_is_filled(set, i)) { self->cur = i + 1; return set->table[i]; } @@ -154,7 +152,6 @@ STATIC mp_obj_t set_getiter(mp_obj_t set_in, mp_obj_iter_buf_t *iter_buf) { return MP_OBJ_FROM_PTR(o); } - /******************************************************************************/ /* set methods */ @@ -169,9 +166,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_add_obj, set_add); STATIC mp_obj_t set_clear(mp_obj_t self_in) { check_set(self_in); mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in); - mp_set_clear(&self->set); - return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_clear_obj, set_clear); @@ -332,6 +327,7 @@ STATIC mp_obj_t set_issubset_internal(mp_obj_t self_in, mp_obj_t other_in, bool } return out; } + STATIC mp_obj_t set_issubset(mp_obj_t self_in, mp_obj_t other_in) { return set_issubset_internal(self_in, other_in, false); } @@ -434,14 +430,14 @@ STATIC mp_obj_t set_unary_op(mp_unary_op_t op, mp_obj_t self_in) { case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(self->set.used); #if MICROPY_PY_BUILTINS_FROZENSET case MP_UNARY_OP_HASH: - if (MP_OBJ_IS_TYPE(self_in, &mp_type_frozenset)) { + if (mp_obj_is_type(self_in, &mp_type_frozenset)) { // start hash with unique value mp_int_t hash = (mp_int_t)(uintptr_t)&mp_type_frozenset; size_t max = self->set.alloc; mp_set_t *set = &self->set; for (size_t i = 0; i < max; i++) { - if (MP_SET_SLOT_IS_FILLED(set, i)) { + if (mp_set_slot_is_filled(set, i)) { hash += MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, set->table[i])); } } @@ -455,7 +451,7 @@ STATIC mp_obj_t set_unary_op(mp_unary_op_t op, mp_obj_t self_in) { STATIC mp_obj_t set_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { mp_obj_t args[] = {lhs, rhs}; #if MICROPY_PY_BUILTINS_FROZENSET - bool update = MP_OBJ_IS_TYPE(lhs, &mp_type_set); + bool update = mp_obj_is_type(lhs, &mp_type_set); #else bool update = true; #endif @@ -518,7 +514,6 @@ STATIC mp_obj_t set_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { /******************************************************************************/ /* set constructors & public C API */ - STATIC const mp_rom_map_elem_t set_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_add), MP_ROM_PTR(&set_add_obj) }, { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&set_clear_obj) }, @@ -539,7 +534,6 @@ STATIC const mp_rom_map_elem_t set_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&set_update_obj) }, { MP_ROM_QSTR(MP_QSTR___contains__), MP_ROM_PTR(&mp_op_contains_obj) }, }; - STATIC MP_DEFINE_CONST_DICT(set_locals_dict, set_locals_dict_table); const mp_obj_type_t mp_type_set = { @@ -590,7 +584,7 @@ mp_obj_t mp_obj_new_set(size_t n_args, mp_obj_t *items) { } void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_set)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_set)); mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in); mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND); } diff --git a/python/src/py/objslice.c b/python/src/py/objslice.c index de996d831..cfc819edc 100644 --- a/python/src/py/objslice.c +++ b/python/src/py/objslice.c @@ -34,8 +34,6 @@ #if MICROPY_PY_BUILTINS_SLICE -// TODO: This implements only variant of slice with 2 integer args only. -// CPython supports 3rd arg (step), plus args can be arbitrary Python objects. typedef struct _mp_obj_slice_t { mp_obj_base_t base; mp_obj_t start; @@ -91,7 +89,7 @@ mp_obj_t mp_obj_new_slice(mp_obj_t ostart, mp_obj_t ostop, mp_obj_t ostep) { } void mp_obj_slice_get(mp_obj_t self_in, mp_obj_t *start, mp_obj_t *stop, mp_obj_t *step) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_slice)); + assert(mp_obj_is_type(self_in, &mp_type_slice)); mp_obj_slice_t *self = MP_OBJ_TO_PTR(self_in); *start = self->start; *stop = self->stop; diff --git a/python/src/py/objstr.c b/python/src/py/objstr.c index da925234e..1047ea94c 100644 --- a/python/src/py/objstr.c +++ b/python/src/py/objstr.c @@ -4,7 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2014-2018 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -34,7 +34,9 @@ #include "py/runtime.h" #include "py/stackctrl.h" +#if MICROPY_PY_BUILTINS_STR_OP_MODULO STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, size_t n_args, const mp_obj_t *args, mp_obj_t dict); +#endif STATIC mp_obj_t mp_obj_new_bytes_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf); STATIC NORETURN void bad_implicit_conversion(mp_obj_t self_in); @@ -116,7 +118,7 @@ STATIC void str_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t } #endif #if !MICROPY_PY_BUILTINS_STR_UNICODE - bool is_bytes = MP_OBJ_IS_TYPE(self_in, &mp_type_bytes); + bool is_bytes = mp_obj_is_type(self_in, &mp_type_bytes); #else bool is_bytes = true; #endif @@ -153,7 +155,7 @@ mp_obj_t mp_obj_str_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_ default: // 2 or 3 args // TODO: validate 2nd/3rd args - if (MP_OBJ_IS_TYPE(args[0], &mp_type_bytes)) { + if (mp_obj_is_type(args[0], &mp_type_bytes)) { GET_STR_DATA_LEN(args[0], str_data, str_len); GET_STR_HASH(args[0], str_hash); if (str_hash == 0) { @@ -203,7 +205,7 @@ STATIC mp_obj_t bytes_make_new(const mp_obj_type_t *type_in, size_t n_args, size return mp_const_empty_bytes; } - if (MP_OBJ_IS_STR(args[0])) { + if (mp_obj_is_str(args[0])) { if (n_args < 2 || n_args > 3) { goto wrong_args; } @@ -222,7 +224,7 @@ STATIC mp_obj_t bytes_make_new(const mp_obj_type_t *type_in, size_t n_args, size goto wrong_args; } - if (MP_OBJ_IS_SMALL_INT(args[0])) { + if (mp_obj_is_small_int(args[0])) { mp_int_t len = MP_OBJ_SMALL_INT_VALUE(args[0]); if (len < 0) { mp_raise_ValueError(NULL); @@ -297,20 +299,24 @@ const byte *find_subbytes(const byte *haystack, size_t hlen, const byte *needle, // Note: this function is used to check if an object is a str or bytes, which // works because both those types use it as their binary_op method. Revisit -// MP_OBJ_IS_STR_OR_BYTES if this fact changes. +// mp_obj_is_str_or_bytes if this fact changes. mp_obj_t mp_obj_str_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) { // check for modulo if (op == MP_BINARY_OP_MODULO) { + #if MICROPY_PY_BUILTINS_STR_OP_MODULO mp_obj_t *args = &rhs_in; size_t n_args = 1; mp_obj_t dict = MP_OBJ_NULL; - if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_tuple)) { + if (mp_obj_is_type(rhs_in, &mp_type_tuple)) { // TODO: Support tuple subclasses? mp_obj_tuple_get(rhs_in, &n_args, &args); - } else if (MP_OBJ_IS_TYPE(rhs_in, &mp_type_dict)) { + } else if (mp_obj_is_type(rhs_in, &mp_type_dict)) { dict = rhs_in; } return str_modulo_format(lhs_in, n_args, args, dict); + #else + return MP_OBJ_NULL; + #endif } // from now on we need lhs type and data, so extract them @@ -419,7 +425,7 @@ STATIC mp_obj_t bytes_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { if (value == MP_OBJ_SENTINEL) { // load #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_bound_slice_t slice; if (!mp_seq_get_fast_slice_indexes(self_len, index, &slice)) { mp_raise_NotImplementedError("only slices with step=1 (aka None) are supported"); @@ -440,7 +446,7 @@ STATIC mp_obj_t bytes_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { } STATIC mp_obj_t str_join(mp_obj_t self_in, mp_obj_t arg) { - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(self_in)); + mp_check_self(mp_obj_is_str_or_bytes(self_in)); const mp_obj_type_t *self_type = mp_obj_get_type(self_in); // get separation string @@ -450,7 +456,7 @@ STATIC mp_obj_t str_join(mp_obj_t self_in, mp_obj_t arg) { size_t seq_len; mp_obj_t *seq_items; - if (!MP_OBJ_IS_TYPE(arg, &mp_type_list) && !MP_OBJ_IS_TYPE(arg, &mp_type_tuple)) { + if (!mp_obj_is_type(arg, &mp_type_list) && !mp_obj_is_type(arg, &mp_type_tuple)) { // arg is not a list nor a tuple, try to convert it to a list // TODO: Try to optimize? arg = mp_type_list.make_new(&mp_type_list, 1, 0, &arg); @@ -680,7 +686,7 @@ MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rsplit_obj, 1, 3, str_rsplit); STATIC mp_obj_t str_finder(size_t n_args, const mp_obj_t *args, int direction, bool is_index) { const mp_obj_type_t *self_type = mp_obj_get_type(args[0]); - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(args[0])); + mp_check_self(mp_obj_is_str_or_bytes(args[0])); // check argument type if (mp_obj_get_type(args[1]) != self_type) { @@ -778,7 +784,7 @@ MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_endswith_obj, 2, 3, str_endswith); enum { LSTRIP, RSTRIP, STRIP }; STATIC mp_obj_t str_uni_strip(int type, size_t n_args, const mp_obj_t *args) { - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(args[0])); + mp_check_self(mp_obj_is_str_or_bytes(args[0])); const mp_obj_type_t *self_type = mp_obj_get_type(args[0]); const byte *chars_to_del; @@ -904,7 +910,7 @@ STATIC bool istype(char ch) { } STATIC bool arg_looks_integer(mp_obj_t arg) { - return MP_OBJ_IS_TYPE(arg, &mp_type_bool) || MP_OBJ_IS_INT(arg); + return mp_obj_is_type(arg, &mp_type_bool) || mp_obj_is_int(arg); } STATIC bool arg_looks_numeric(mp_obj_t arg) { @@ -915,6 +921,7 @@ STATIC bool arg_looks_numeric(mp_obj_t arg) { ; } +#if MICROPY_PY_BUILTINS_STR_OP_MODULO STATIC mp_obj_t arg_as_int(mp_obj_t arg) { #if MICROPY_PY_BUILTINS_FLOAT if (mp_obj_is_float(arg)) { @@ -923,6 +930,7 @@ STATIC mp_obj_t arg_as_int(mp_obj_t arg) { #endif return arg; } +#endif #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE STATIC NORETURN void terse_str_format_value_error(void) { @@ -1327,7 +1335,7 @@ STATIC vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *ar terse_str_format_value_error(); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, - "unknown format code '%c' for object of type 'float'", + "unknown format code '%c' for object of type '%s'", type, mp_obj_get_type_str(arg))); } } @@ -1363,7 +1371,7 @@ STATIC vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *ar terse_str_format_value_error(); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError, - "unknown format code '%c' for object of type 'str'", + "unknown format code '%c' for object of type '%s'", type, mp_obj_get_type_str(arg))); } } @@ -1374,21 +1382,22 @@ STATIC vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *ar } mp_obj_t mp_obj_str_format(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) { - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(args[0])); + mp_check_self(mp_obj_is_str_or_bytes(args[0])); GET_STR_DATA_LEN(args[0], str, len); int arg_i = 0; vstr_t vstr = mp_obj_str_format_helper((const char*)str, (const char*)str + len, &arg_i, n_args, args, kwargs); - return mp_obj_new_str_from_vstr(&mp_type_str, &vstr); + return mp_obj_new_str_from_vstr(mp_obj_get_type(args[0]), &vstr); } MP_DEFINE_CONST_FUN_OBJ_KW(str_format_obj, 1, mp_obj_str_format); +#if MICROPY_PY_BUILTINS_STR_OP_MODULO STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, size_t n_args, const mp_obj_t *args, mp_obj_t dict) { - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(pattern)); + mp_check_self(mp_obj_is_str_or_bytes(pattern)); GET_STR_DATA_LEN(pattern, str, len); const byte *start_str = str; - bool is_bytes = MP_OBJ_IS_TYPE(pattern, &mp_type_bytes); + bool is_bytes = mp_obj_is_type(pattern, &mp_type_bytes); size_t arg_i = 0; vstr_t vstr; mp_print_t print; @@ -1411,7 +1420,7 @@ STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, size_t n_args, const mp_obj_ // Dictionary value lookup if (*str == '(') { if (dict == MP_OBJ_NULL) { - mp_raise_TypeError("format requires a dict"); + mp_raise_TypeError("format needs a dict"); } arg_i = 1; // we used up the single dict argument const byte *key = ++str; @@ -1486,24 +1495,24 @@ incomplete_format: if (arg == MP_OBJ_NULL) { if (arg_i >= n_args) { not_enough_args: - mp_raise_TypeError("not enough arguments for format string"); + mp_raise_TypeError("format string needs more arguments"); } arg = args[arg_i++]; } switch (*str) { case 'c': - if (MP_OBJ_IS_STR(arg)) { + if (mp_obj_is_str(arg)) { size_t slen; const char *s = mp_obj_str_get_data(arg, &slen); if (slen != 1) { - mp_raise_TypeError("%%c requires int or char"); + mp_raise_TypeError("%%c needs int or char"); } mp_print_strn(&print, s, 1, flags, ' ', width); } else if (arg_looks_integer(arg)) { char ch = mp_obj_get_int(arg); mp_print_strn(&print, &ch, 1, flags, ' ', width); } else { - mp_raise_TypeError("integer required"); + mp_raise_TypeError("integer needed"); } break; @@ -1538,7 +1547,7 @@ not_enough_args: mp_print_t arg_print; vstr_init_print(&arg_vstr, 16, &arg_print); mp_print_kind_t print_kind = (*str == 'r' ? PRINT_REPR : PRINT_STR); - if (print_kind == PRINT_STR && is_bytes && MP_OBJ_IS_TYPE(arg, &mp_type_bytes)) { + if (print_kind == PRINT_STR && is_bytes && mp_obj_is_type(arg, &mp_type_bytes)) { // If we have something like b"%s" % b"1", bytes arg should be // printed undecorated. print_kind = PRINT_RAW; @@ -1573,16 +1582,17 @@ not_enough_args: } if (arg_i != n_args) { - mp_raise_TypeError("not all arguments converted during string formatting"); + mp_raise_TypeError("format string didn't convert all arguments"); } return mp_obj_new_str_from_vstr(is_bytes ? &mp_type_bytes : &mp_type_str, &vstr); } +#endif // The implementation is optimized, returning the original string if there's // nothing to replace. STATIC mp_obj_t str_replace(size_t n_args, const mp_obj_t *args) { - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(args[0])); + mp_check_self(mp_obj_is_str_or_bytes(args[0])); mp_int_t max_rep = -1; if (n_args == 4) { @@ -1687,9 +1697,10 @@ STATIC mp_obj_t str_replace(size_t n_args, const mp_obj_t *args) { } MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_replace_obj, 3, 4, str_replace); +#if MICROPY_PY_BUILTINS_STR_COUNT STATIC mp_obj_t str_count(size_t n_args, const mp_obj_t *args) { const mp_obj_type_t *self_type = mp_obj_get_type(args[0]); - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(args[0])); + mp_check_self(mp_obj_is_str_or_bytes(args[0])); // check argument type if (mp_obj_get_type(args[1]) != self_type) { @@ -1727,10 +1738,11 @@ STATIC mp_obj_t str_count(size_t n_args, const mp_obj_t *args) { return MP_OBJ_NEW_SMALL_INT(num_occurrences); } MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_count_obj, 2, 4, str_count); +#endif #if MICROPY_PY_BUILTINS_STR_PARTITION STATIC mp_obj_t str_partitioner(mp_obj_t self_in, mp_obj_t arg, int direction) { - mp_check_self(MP_OBJ_IS_STR_OR_BYTES(self_in)); + mp_check_self(mp_obj_is_str_or_bytes(self_in)); mp_obj_type_t *self_type = mp_obj_get_type(self_in); if (self_type != mp_obj_get_type(arg)) { bad_implicit_conversion(arg); @@ -1937,7 +1949,9 @@ STATIC const mp_rom_map_elem_t str8_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_rstrip), MP_ROM_PTR(&str_rstrip_obj) }, { MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&str_format_obj) }, { MP_ROM_QSTR(MP_QSTR_replace), MP_ROM_PTR(&str_replace_obj) }, + #if MICROPY_PY_BUILTINS_STR_COUNT { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&str_count_obj) }, + #endif #if MICROPY_PY_BUILTINS_STR_PARTITION { MP_ROM_QSTR(MP_QSTR_partition), MP_ROM_PTR(&str_partition_obj) }, { MP_ROM_QSTR(MP_QSTR_rpartition), MP_ROM_PTR(&str_rpartition_obj) }, @@ -2078,7 +2092,7 @@ mp_obj_t mp_obj_new_bytes(const byte* data, size_t len) { } bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2) { - if (MP_OBJ_IS_QSTR(s1) && MP_OBJ_IS_QSTR(s2)) { + if (mp_obj_is_qstr(s1) && mp_obj_is_qstr(s2)) { return s1 == s2; } else { GET_STR_HASH(s1, h1); @@ -2110,9 +2124,9 @@ STATIC NORETURN void bad_implicit_conversion(mp_obj_t self_in) { // use this if you will anyway convert the string to a qstr // will be more efficient for the case where it's already a qstr qstr mp_obj_str_get_qstr(mp_obj_t self_in) { - if (MP_OBJ_IS_QSTR(self_in)) { + if (mp_obj_is_qstr(self_in)) { return MP_OBJ_QSTR_VALUE(self_in); - } else if (MP_OBJ_IS_TYPE(self_in, &mp_type_str)) { + } else if (mp_obj_is_type(self_in, &mp_type_str)) { mp_obj_str_t *self = MP_OBJ_TO_PTR(self_in); return qstr_from_strn((char*)self->data, self->len); } else { @@ -2123,7 +2137,7 @@ qstr mp_obj_str_get_qstr(mp_obj_t self_in) { // only use this function if you need the str data to be zero terminated // at the moment all strings are zero terminated to help with C ASCIIZ compatibility const char *mp_obj_str_get_str(mp_obj_t self_in) { - if (MP_OBJ_IS_STR_OR_BYTES(self_in)) { + if (mp_obj_is_str_or_bytes(self_in)) { GET_STR_DATA_LEN(self_in, s, l); (void)l; // len unused return (const char*)s; @@ -2133,7 +2147,7 @@ const char *mp_obj_str_get_str(mp_obj_t self_in) { } const char *mp_obj_str_get_data(mp_obj_t self_in, size_t *len) { - if (MP_OBJ_IS_STR_OR_BYTES(self_in)) { + if (mp_obj_is_str_or_bytes(self_in)) { GET_STR_DATA_LEN(self_in, s, l); *len = l; return (const char*)s; @@ -2144,7 +2158,7 @@ const char *mp_obj_str_get_data(mp_obj_t self_in, size_t *len) { #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C const byte *mp_obj_str_get_data_no_check(mp_obj_t self_in, size_t *len) { - if (MP_OBJ_IS_QSTR(self_in)) { + if (mp_obj_is_qstr(self_in)) { return qstr_data(MP_OBJ_QSTR_VALUE(self_in), len); } else { *len = ((mp_obj_str_t*)self_in)->len; diff --git a/python/src/py/objstr.h b/python/src/py/objstr.h index 4e55cad09..15ed7a225 100644 --- a/python/src/py/objstr.h +++ b/python/src/py/objstr.h @@ -41,12 +41,12 @@ typedef struct _mp_obj_str_t { // use this macro to extract the string hash // warning: the hash can be 0, meaning invalid, and must then be explicitly computed from the data #define GET_STR_HASH(str_obj_in, str_hash) \ - mp_uint_t str_hash; if (MP_OBJ_IS_QSTR(str_obj_in)) \ + mp_uint_t str_hash; if (mp_obj_is_qstr(str_obj_in)) \ { str_hash = qstr_hash(MP_OBJ_QSTR_VALUE(str_obj_in)); } else { str_hash = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->hash; } // use this macro to extract the string length #define GET_STR_LEN(str_obj_in, str_len) \ - size_t str_len; if (MP_OBJ_IS_QSTR(str_obj_in)) \ + size_t str_len; if (mp_obj_is_qstr(str_obj_in)) \ { str_len = qstr_len(MP_OBJ_QSTR_VALUE(str_obj_in)); } else { str_len = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->len; } // use this macro to extract the string data and length @@ -56,7 +56,7 @@ const byte *mp_obj_str_get_data_no_check(mp_obj_t self_in, size_t *len); size_t str_len; const byte *str_data = mp_obj_str_get_data_no_check(str_obj_in, &str_len); #else #define GET_STR_DATA_LEN(str_obj_in, str_data, str_len) \ - const byte *str_data; size_t str_len; if (MP_OBJ_IS_QSTR(str_obj_in)) \ + const byte *str_data; size_t str_len; if (mp_obj_is_qstr(str_obj_in)) \ { str_data = qstr_data(MP_OBJ_QSTR_VALUE(str_obj_in), &str_len); } \ else { str_len = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->len; str_data = ((mp_obj_str_t*)MP_OBJ_TO_PTR(str_obj_in))->data; } #endif @@ -102,5 +102,6 @@ MP_DECLARE_CONST_FUN_OBJ_1(str_isalpha_obj); MP_DECLARE_CONST_FUN_OBJ_1(str_isdigit_obj); MP_DECLARE_CONST_FUN_OBJ_1(str_isupper_obj); MP_DECLARE_CONST_FUN_OBJ_1(str_islower_obj); +MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(bytes_decode_obj); #endif // MICROPY_INCLUDED_PY_OBJSTR_H diff --git a/python/src/py/objstringio.c b/python/src/py/objstringio.c index b405ee21e..89b679031 100644 --- a/python/src/py/objstringio.c +++ b/python/src/py/objstringio.c @@ -4,7 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2014-2017 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -194,12 +194,12 @@ STATIC mp_obj_t stringio_make_new(const mp_obj_type_t *type_in, size_t n_args, s mp_obj_stringio_t *o = stringio_new(type_in); if (n_args > 0) { - if (MP_OBJ_IS_INT(args[0])) { + if (mp_obj_is_int(args[0])) { sz = mp_obj_get_int(args[0]); } else { mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ); - if (MP_OBJ_IS_STR_OR_BYTES(args[0])) { + if (mp_obj_is_str_or_bytes(args[0])) { o->vstr = m_new_obj(vstr_t); vstr_init_fixed_buf(o->vstr, bufinfo.len, bufinfo.buf); o->vstr->len = bufinfo.len; diff --git a/python/src/py/objstrunicode.c b/python/src/py/objstrunicode.c index badb569d7..78d0b5006 100644 --- a/python/src/py/objstrunicode.c +++ b/python/src/py/objstrunicode.c @@ -4,7 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2014-2016 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -126,7 +126,7 @@ const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, s // Copied from mp_get_index; I don't want bounds checking, just give me // the integer as-is. (I can't bounds-check without scanning the whole // string; an out-of-bounds index will be caught in the loops below.) - if (MP_OBJ_IS_SMALL_INT(index)) { + if (mp_obj_is_small_int(index)) { i = MP_OBJ_SMALL_INT_VALUE(index); } else if (!mp_obj_get_int_maybe(index, &i)) { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, "string indices must be integers, not %s", mp_obj_get_type_str(index))); @@ -182,7 +182,7 @@ STATIC mp_obj_t str_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { if (value == MP_OBJ_SENTINEL) { // load #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_obj_t ostart, ostop, ostep; mp_obj_slice_get(index, &ostart, &ostop, &ostep); if (ostep != mp_const_none && ostep != MP_OBJ_NEW_SMALL_INT(1)) { @@ -243,7 +243,9 @@ STATIC const mp_rom_map_elem_t struni_locals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR_rstrip), MP_ROM_PTR(&str_rstrip_obj) }, { MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&str_format_obj) }, { MP_ROM_QSTR(MP_QSTR_replace), MP_ROM_PTR(&str_replace_obj) }, + #if MICROPY_PY_BUILTINS_STR_COUNT { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&str_count_obj) }, + #endif #if MICROPY_PY_BUILTINS_STR_PARTITION { MP_ROM_QSTR(MP_QSTR_partition), MP_ROM_PTR(&str_partition_obj) }, { MP_ROM_QSTR(MP_QSTR_rpartition), MP_ROM_PTR(&str_rpartition_obj) }, diff --git a/python/src/py/objtuple.c b/python/src/py/objtuple.c index 34b7664eb..39eb94023 100644 --- a/python/src/py/objtuple.c +++ b/python/src/py/objtuple.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2017 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -70,7 +71,7 @@ STATIC mp_obj_t mp_obj_tuple_make_new(const mp_obj_type_t *type_in, size_t n_arg case 1: default: { // 1 argument, an iterable from which we make a new tuple - if (MP_OBJ_IS_TYPE(args[0], &mp_type_tuple)) { + if (mp_obj_is_type(args[0], &mp_type_tuple)) { return args[0]; } @@ -179,7 +180,7 @@ mp_obj_t mp_obj_tuple_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { // load mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); #if MICROPY_PY_BUILTINS_SLICE - if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) { + if (mp_obj_is_type(index, &mp_type_slice)) { mp_bound_slice_t slice; if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) { mp_raise_NotImplementedError("only slices with step=1 (aka None) are supported"); @@ -197,14 +198,14 @@ mp_obj_t mp_obj_tuple_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { } STATIC mp_obj_t tuple_count(mp_obj_t self_in, mp_obj_t value) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_tuple)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_tuple)); mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); return mp_seq_count_obj(self->items, self->len, value); } STATIC MP_DEFINE_CONST_FUN_OBJ_2(tuple_count_obj, tuple_count); STATIC mp_obj_t tuple_index(size_t n_args, const mp_obj_t *args) { - mp_check_self(MP_OBJ_IS_TYPE(args[0], &mp_type_tuple)); + mp_check_self(mp_obj_is_type(args[0], &mp_type_tuple)); mp_obj_tuple_t *self = MP_OBJ_TO_PTR(args[0]); return mp_seq_index_obj(self->items, self->len, n_args, args); } @@ -248,14 +249,14 @@ mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items) { } void mp_obj_tuple_get(mp_obj_t self_in, size_t *len, mp_obj_t **items) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_tuple)); + assert(mp_obj_is_type(self_in, &mp_type_tuple)); mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); *len = self->len; *items = &self->items[0]; } void mp_obj_tuple_del(mp_obj_t self_in) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_tuple)); + assert(mp_obj_is_type(self_in, &mp_type_tuple)); mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in); m_del_var(mp_obj_tuple_t, mp_obj_t, self->len, self); } diff --git a/python/src/py/objtype.c b/python/src/py/objtype.c index 7df349ce9..3e65a32f0 100644 --- a/python/src/py/objtype.c +++ b/python/src/py/objtype.c @@ -3,8 +3,8 @@ * * The MIT License (MIT) * - * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014-2016 Paul Sokolovsky + * Copyright (c) 2013-2018 Damien P. George + * Copyright (c) 2014-2018 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -41,6 +41,12 @@ #define DEBUG_printf(...) (void)0 #endif +#define ENABLE_SPECIAL_ACCESSORS \ + (MICROPY_PY_DESCRIPTORS || MICROPY_PY_DELATTR_SETATTR || MICROPY_PY_BUILTINS_PROPERTY) + +#define TYPE_FLAG_IS_SUBCLASSED (0x0001) +#define TYPE_FLAG_HAS_SPECIAL_ACCESSORS (0x0002) + STATIC mp_obj_t static_class_method_make_new(const mp_obj_type_t *self_in, size_t n_args, size_t n_kw, const mp_obj_t *args); /******************************************************************************/ @@ -66,7 +72,7 @@ STATIC int instance_count_native_bases(const mp_obj_type_t *type, const mp_obj_t const mp_obj_t *item = parent_tuple->items; const mp_obj_t *top = item + parent_tuple->len; for (; item < top; ++item) { - assert(MP_OBJ_IS_TYPE(*item, &mp_type_type)); + assert(mp_obj_is_type(*item, &mp_type_type)); const mp_obj_type_t *bt = (const mp_obj_type_t *)MP_OBJ_TO_PTR(*item); count += instance_count_native_bases(bt, last_native_base); } @@ -171,10 +177,13 @@ STATIC void mp_obj_class_lookup(struct class_lookup_data *lookup, const mp_obj_ mp_convert_member_lookup(obj_obj, type, elem->value, lookup->dest); } #if DEBUG_PRINT - printf("mp_obj_class_lookup: Returning: "); - mp_obj_print(lookup->dest[0], PRINT_REPR); printf(" "); - // Don't try to repr() lookup->dest[1], as we can be called recursively - printf("<%s @%p>\n", mp_obj_get_type_str(lookup->dest[1]), lookup->dest[1]); + DEBUG_printf("mp_obj_class_lookup: Returning: "); + mp_obj_print_helper(MICROPY_DEBUG_PRINTER, lookup->dest[0], PRINT_REPR); + if (lookup->dest[1] != MP_OBJ_NULL) { + // Don't try to repr() lookup->dest[1], as we can be called recursively + DEBUG_printf(" <%s @%p>", mp_obj_get_type_str(lookup->dest[1]), MP_OBJ_TO_PTR(lookup->dest[1])); + } + DEBUG_printf("\n"); #endif return; } @@ -201,7 +210,7 @@ STATIC void mp_obj_class_lookup(struct class_lookup_data *lookup, const mp_obj_ const mp_obj_t *item = parent_tuple->items; const mp_obj_t *top = item + parent_tuple->len - 1; for (; item < top; ++item) { - assert(MP_OBJ_IS_TYPE(*item, &mp_type_type)); + assert(mp_obj_is_type(*item, &mp_type_type)); mp_obj_type_t *bt = (mp_obj_type_t*)MP_OBJ_TO_PTR(*item); if (bt == &mp_type_object) { // Not a "real" type @@ -214,7 +223,7 @@ STATIC void mp_obj_class_lookup(struct class_lookup_data *lookup, const mp_obj_ } // search last base (simple tail recursion elimination) - assert(MP_OBJ_IS_TYPE(*item, &mp_type_type)); + assert(mp_obj_is_type(*item, &mp_type_type)); type = (mp_obj_type_t*)MP_OBJ_TO_PTR(*item); #endif } else { @@ -367,6 +376,7 @@ const byte mp_unary_op_method_name[MP_UNARY_OP_NUM_RUNTIME] = { [MP_UNARY_OP_BOOL] = MP_QSTR___bool__, [MP_UNARY_OP_LEN] = MP_QSTR___len__, [MP_UNARY_OP_HASH] = MP_QSTR___hash__, + [MP_UNARY_OP_INT] = MP_QSTR___int__, #if MICROPY_PY_ALL_SPECIAL_METHODS [MP_UNARY_OP_POSITIVE] = MP_QSTR___pos__, [MP_UNARY_OP_NEGATIVE] = MP_QSTR___neg__, @@ -412,9 +422,21 @@ STATIC mp_obj_t instance_unary_op(mp_unary_op_t op, mp_obj_t self_in) { return mp_unary_op(op, self->subobj[0]); } else if (member[0] != MP_OBJ_NULL) { mp_obj_t val = mp_call_function_1(member[0], self_in); - // __hash__ must return a small int - if (op == MP_UNARY_OP_HASH) { - val = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int_truncated(val)); + + switch (op) { + case MP_UNARY_OP_HASH: + // __hash__ must return a small int + val = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int_truncated(val)); + break; + case MP_UNARY_OP_INT: + // Must return int + if (!mp_obj_is_int(val)) { + mp_raise_TypeError(NULL); + } + break; + default: + // No need to do anything + ; } return val; } else { @@ -451,8 +473,7 @@ const byte mp_binary_op_method_name[MP_BINARY_OP_NUM_RUNTIME] = { // MP_BINARY_OP_NOT_EQUAL, // a != b calls a == b and inverts result [MP_BINARY_OP_CONTAINS] = MP_QSTR___contains__, - // All inplace methods are optional, and normal methods will be used - // as a fallback. + // If an inplace method is not found a normal method will be used as a fallback [MP_BINARY_OP_INPLACE_ADD] = MP_QSTR___iadd__, [MP_BINARY_OP_INPLACE_SUBTRACT] = MP_QSTR___isub__, #if MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS @@ -562,7 +583,6 @@ STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *des mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP); if (elem != NULL) { // object member, always treated as a value - // TODO should we check for properties? dest[0] = elem->value; return; } @@ -574,7 +594,7 @@ STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *des mp_map_t *map = &self->members; mp_obj_t attr_dict = mp_obj_new_dict(map->used); for (size_t i = 0; i < map->alloc; ++i) { - if (MP_MAP_SLOT_IS_FILLED(map, i)) { + if (mp_map_slot_is_filled(map, i)) { mp_obj_dict_store(attr_dict, map->table[i].key, map->table[i].value); } } @@ -592,8 +612,13 @@ STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *des mp_obj_class_lookup(&lookup, self->base.type); mp_obj_t member = dest[0]; if (member != MP_OBJ_NULL) { + if (!(self->base.type->flags & TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) { + // Class doesn't have any special accessors to check so return straightaway + return; + } + #if MICROPY_PY_BUILTINS_PROPERTY - if (MP_OBJ_IS_TYPE(member, &mp_type_property)) { + if (mp_obj_is_type(member, &mp_type_property)) { // object member is a property; delegate the load to the property // Note: This is an optimisation for code size and execution time. // The proper way to do it is have the functionality just below @@ -642,7 +667,6 @@ STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *des mp_load_method_maybe(self_in, MP_QSTR___getattr__, dest2); if (dest2[0] != MP_OBJ_NULL) { // __getattr__ exists, call it and return its result - // XXX if this fails to load the requested attr, should we catch the attribute error and return silently? dest2[2] = MP_OBJ_NEW_QSTR(attr); dest[0] = mp_call_method_n_kw(1, 0, dest2); return; @@ -653,11 +677,15 @@ STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *des STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t value) { mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in); + if (!(self->base.type->flags & TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) { + // Class doesn't have any special accessors so skip their checks + goto skip_special_accessors; + } + #if MICROPY_PY_BUILTINS_PROPERTY || MICROPY_PY_DESCRIPTORS // With property and/or descriptors enabled we need to do a lookup // first in the class dict for the attribute to see if the store should // be delegated. - // Note: this makes all stores slow... how to fix? mp_obj_t member[2] = {MP_OBJ_NULL}; struct class_lookup_data lookup = { .obj = self, @@ -670,7 +698,7 @@ STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t val if (member[0] != MP_OBJ_NULL) { #if MICROPY_PY_BUILTINS_PROPERTY - if (MP_OBJ_IS_TYPE(member[0], &mp_type_property)) { + if (mp_obj_is_type(member[0], &mp_type_property)) { // attribute exists and is a property; delegate the store/delete // Note: This is an optimisation for code size and execution time. // The proper way to do it is have the functionality just below in @@ -729,9 +757,9 @@ STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t val } #endif + #if MICROPY_PY_DELATTR_SETATTR if (value == MP_OBJ_NULL) { // delete attribute - #if MICROPY_PY_DELATTR_SETATTR // try __delattr__ first mp_obj_t attr_delattr_method[3]; mp_load_method_maybe(self_in, MP_QSTR___delattr__, attr_delattr_method); @@ -741,13 +769,8 @@ STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t val mp_call_method_n_kw(1, 0, attr_delattr_method); return true; } - #endif - - mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_REMOVE_IF_FOUND); - return elem != NULL; } else { // store attribute - #if MICROPY_PY_DELATTR_SETATTR // try __setattr__ first mp_obj_t attr_setattr_method[4]; mp_load_method_maybe(self_in, MP_QSTR___setattr__, attr_setattr_method); @@ -758,14 +781,23 @@ STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t val mp_call_method_n_kw(2, 0, attr_setattr_method); return true; } - #endif + } + #endif +skip_special_accessors: + + if (value == MP_OBJ_NULL) { + // delete attribute + mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_REMOVE_IF_FOUND); + return elem != NULL; + } else { + // store attribute mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value; return true; } } -void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { +STATIC void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { if (dest[0] == MP_OBJ_NULL) { mp_obj_instance_load_attr(self_in, attr, dest); } else { @@ -777,36 +809,29 @@ void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { STATIC mp_obj_t instance_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) { mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in); - mp_obj_t member[2] = {MP_OBJ_NULL}; + mp_obj_t member[4] = {MP_OBJ_NULL, MP_OBJ_NULL, index, value}; struct class_lookup_data lookup = { .obj = self, .meth_offset = offsetof(mp_obj_type_t, subscr), .dest = member, .is_type = false, }; - size_t meth_args; if (value == MP_OBJ_NULL) { // delete item lookup.attr = MP_QSTR___delitem__; - mp_obj_class_lookup(&lookup, self->base.type); - meth_args = 2; } else if (value == MP_OBJ_SENTINEL) { // load item lookup.attr = MP_QSTR___getitem__; - mp_obj_class_lookup(&lookup, self->base.type); - meth_args = 2; } else { // store item lookup.attr = MP_QSTR___setitem__; - mp_obj_class_lookup(&lookup, self->base.type); - meth_args = 3; } + mp_obj_class_lookup(&lookup, self->base.type); if (member[0] == MP_OBJ_SENTINEL) { return mp_obj_subscr(self->subobj[0], index, value); } else if (member[0] != MP_OBJ_NULL) { - mp_obj_t args[3] = {self_in, index, value}; - // TODO probably need to call mp_convert_member_lookup, and use mp_call_method_n_kw - mp_obj_t ret = mp_call_function_n_kw(member[0], meth_args, 0, args); + size_t n_args = value == MP_OBJ_NULL || value == MP_OBJ_SENTINEL ? 1 : 2; + mp_obj_t ret = mp_call_method_n_kw(n_args, 0, member); if (value == MP_OBJ_SENTINEL) { return ret; } else { @@ -843,7 +868,7 @@ mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, cons mp_raise_TypeError("object not callable"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object is not callable", mp_obj_get_type_str(self_in))); + "'%s' object isn't callable", mp_obj_get_type_str(self_in))); } } mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in); @@ -900,6 +925,34 @@ STATIC mp_int_t instance_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, // - there is a constant mp_obj_type_t (called mp_type_type) for the 'type' object // - creating a new class (a new type) creates a new mp_obj_type_t +#if ENABLE_SPECIAL_ACCESSORS +STATIC bool check_for_special_accessors(mp_obj_t key, mp_obj_t value) { + #if MICROPY_PY_DELATTR_SETATTR + if (key == MP_OBJ_NEW_QSTR(MP_QSTR___setattr__) || key == MP_OBJ_NEW_QSTR(MP_QSTR___delattr__)) { + return true; + } + #endif + #if MICROPY_PY_BUILTINS_PROPERTY + if (mp_obj_is_type(value, &mp_type_property)) { + return true; + } + #endif + #if MICROPY_PY_DESCRIPTORS + static const uint8_t to_check[] = { + MP_QSTR___get__, MP_QSTR___set__, MP_QSTR___delete__, + }; + for (size_t i = 0; i < MP_ARRAY_SIZE(to_check); ++i) { + mp_obj_t dest_temp[2]; + mp_load_method_protected(value, to_check[i], dest_temp, true); + if (dest_temp[0] != MP_OBJ_NULL) { + return true; + } + } + #endif + return false; +} +#endif + STATIC void type_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { (void)kind; mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in); @@ -948,7 +1001,7 @@ STATIC mp_obj_t type_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp } STATIC void type_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_type)); + assert(mp_obj_is_type(self_in, &mp_type_type)); mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in); if (dest[0] == MP_OBJ_NULL) { @@ -970,8 +1023,6 @@ STATIC void type_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { } else { // delete/store attribute - // TODO CPython allows STORE_ATTR to a class, but is this the correct implementation? - if (self->locals_dict != NULL) { assert(self->locals_dict->base.type == &mp_type_dict); // MicroPython restriction, for now mp_map_t *locals_map = &self->locals_dict->map; @@ -986,6 +1037,19 @@ STATIC void type_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { dest[0] = MP_OBJ_NULL; // indicate success } } else { + #if ENABLE_SPECIAL_ACCESSORS + // Check if we add any special accessor methods with this store + if (!(self->flags & TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) { + if (check_for_special_accessors(MP_OBJ_NEW_QSTR(attr), dest[1])) { + if (self->flags & TYPE_FLAG_IS_SUBCLASSED) { + // This class is already subclassed so can't have special accessors added + mp_raise_msg(&mp_type_AttributeError, "can't add special method to already-subclassed class"); + } + self->flags |= TYPE_FLAG_HAS_SPECIAL_ACCESSORS; + } + } + #endif + // store attribute mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND); elem->value = dest[1]; @@ -1006,31 +1070,46 @@ const mp_obj_type_t mp_type_type = { }; mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict) { - assert(MP_OBJ_IS_TYPE(bases_tuple, &mp_type_tuple)); // MicroPython restriction, for now - assert(MP_OBJ_IS_TYPE(locals_dict, &mp_type_dict)); // MicroPython restriction, for now + // Verify input objects have expected type + if (!mp_obj_is_type(bases_tuple, &mp_type_tuple)) { + mp_raise_TypeError(NULL); + } + if (!mp_obj_is_type(locals_dict, &mp_type_dict)) { + mp_raise_TypeError(NULL); + } // TODO might need to make a copy of locals_dict; at least that's how CPython does it // Basic validation of base classes + uint16_t base_flags = 0; size_t bases_len; mp_obj_t *bases_items; mp_obj_tuple_get(bases_tuple, &bases_len, &bases_items); for (size_t i = 0; i < bases_len; i++) { - assert(MP_OBJ_IS_TYPE(bases_items[i], &mp_type_type)); + if (!mp_obj_is_type(bases_items[i], &mp_type_type)) { + mp_raise_TypeError(NULL); + } mp_obj_type_t *t = MP_OBJ_TO_PTR(bases_items[i]); // TODO: Verify with CPy, tested on function type if (t->make_new == NULL) { if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { - mp_raise_TypeError("type is not an acceptable base type"); + mp_raise_TypeError("type isn't an acceptable base type"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "type '%q' is not an acceptable base type", t->name)); + "type '%q' isn't an acceptable base type", t->name)); } } + #if ENABLE_SPECIAL_ACCESSORS + if (mp_obj_is_instance_type(t)) { + t->flags |= TYPE_FLAG_IS_SUBCLASSED; + base_flags |= t->flags & TYPE_FLAG_HAS_SPECIAL_ACCESSORS; + } + #endif } mp_obj_type_t *o = m_new0(mp_obj_type_t, 1); o->base.type = &mp_type_type; + o->flags = base_flags; o->name = name; o->print = instance_print; o->make_new = mp_obj_instance_make_new; @@ -1063,6 +1142,21 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict) o->locals_dict = MP_OBJ_TO_PTR(locals_dict); + #if ENABLE_SPECIAL_ACCESSORS + // Check if the class has any special accessor methods + if (!(o->flags & TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) { + for (size_t i = 0; i < o->locals_dict->map.alloc; i++) { + if (mp_map_slot_is_filled(&o->locals_dict->map, i)) { + const mp_map_elem_t *elem = &o->locals_dict->map.table[i]; + if (check_for_special_accessors(elem->key, elem->value)) { + o->flags |= TYPE_FLAG_HAS_SPECIAL_ACCESSORS; + break; + } + } + } + } + #endif + const mp_obj_type_t *native_base; size_t num_native_bases = instance_count_native_bases(o, &native_base); if (num_native_bases > 1) { @@ -1073,7 +1167,7 @@ mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict) mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(MP_QSTR___new__), MP_MAP_LOOKUP); if (elem != NULL) { // __new__ slot exists; check if it is a function - if (MP_OBJ_IS_FUN(elem->value)) { + if (mp_obj_is_fun(elem->value)) { // __new__ is a function, wrap it in a staticmethod decorator elem->value = static_class_method_make_new(&mp_type_staticmethod, 1, 0, &elem->value); } @@ -1106,6 +1200,9 @@ STATIC mp_obj_t super_make_new(const mp_obj_type_t *type_in, size_t n_args, size // 0 arguments are turned into 2 in the compiler // 1 argument is not yet implemented mp_arg_check_num(n_args, n_kw, 2, 2, false); + if (!mp_obj_is_type(args[0], &mp_type_type)) { + mp_raise_TypeError(NULL); + } mp_obj_super_t *o = m_new_obj(mp_obj_super_t); *o = (mp_obj_super_t){{type_in}, args[0], args[1]}; return MP_OBJ_FROM_PTR(o); @@ -1117,10 +1214,10 @@ STATIC void super_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { return; } - assert(MP_OBJ_IS_TYPE(self_in, &mp_type_super)); + assert(mp_obj_is_type(self_in, &mp_type_super)); mp_obj_super_t *self = MP_OBJ_TO_PTR(self_in); - assert(MP_OBJ_IS_TYPE(self->type, &mp_type_type)); + assert(mp_obj_is_type(self->type, &mp_type_type)); mp_obj_type_t *type = MP_OBJ_TO_PTR(self->type); @@ -1145,7 +1242,7 @@ STATIC void super_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { size_t len = parent_tuple->len; const mp_obj_t *items = parent_tuple->items; for (size_t i = 0; i < len; i++) { - assert(MP_OBJ_IS_TYPE(items[i], &mp_type_type)); + assert(mp_obj_is_type(items[i], &mp_type_type)); if (MP_OBJ_TO_PTR(items[i]) == &mp_type_object) { // The "object" type will be searched at the end of this function, // and we don't want to lookup native methods in object. @@ -1204,7 +1301,7 @@ bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo) { // not equivalent classes, keep searching base classes // object should always be a type object, but just return false if it's not - if (!MP_OBJ_IS_TYPE(object, &mp_type_type)) { + if (!mp_obj_is_type(object, &mp_type_type)) { return false; } @@ -1240,10 +1337,10 @@ bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo) { STATIC mp_obj_t mp_obj_is_subclass(mp_obj_t object, mp_obj_t classinfo) { size_t len; mp_obj_t *items; - if (MP_OBJ_IS_TYPE(classinfo, &mp_type_type)) { + if (mp_obj_is_type(classinfo, &mp_type_type)) { len = 1; items = &classinfo; - } else if (MP_OBJ_IS_TYPE(classinfo, &mp_type_tuple)) { + } else if (mp_obj_is_type(classinfo, &mp_type_tuple)) { mp_obj_tuple_get(classinfo, &len, &items); } else { mp_raise_TypeError("issubclass() arg 2 must be a class or a tuple of classes"); @@ -1259,7 +1356,7 @@ STATIC mp_obj_t mp_obj_is_subclass(mp_obj_t object, mp_obj_t classinfo) { } STATIC mp_obj_t mp_builtin_issubclass(mp_obj_t object, mp_obj_t classinfo) { - if (!MP_OBJ_IS_TYPE(object, &mp_type_type)) { + if (!mp_obj_is_type(object, &mp_type_type)) { mp_raise_TypeError("issubclass() arg 1 must be a class"); } return mp_obj_is_subclass(object, classinfo); diff --git a/python/src/py/objtype.h b/python/src/py/objtype.h index 1f4313084..3fc8c6e1b 100644 --- a/python/src/py/objtype.h +++ b/python/src/py/objtype.h @@ -42,9 +42,6 @@ typedef struct _mp_obj_instance_t { mp_obj_instance_t *mp_obj_new_instance(const mp_obj_type_t *cls, const mp_obj_type_t **native_base); #endif -// this needs to be exposed for MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE to work -void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest); - // these need to be exposed so mp_obj_is_callable can work correctly bool mp_obj_instance_is_callable(mp_obj_t self_in); mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args); diff --git a/python/src/py/objzip.c b/python/src/py/objzip.c index 0183925e3..4abc917c3 100644 --- a/python/src/py/objzip.c +++ b/python/src/py/objzip.c @@ -49,7 +49,7 @@ STATIC mp_obj_t zip_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_ } STATIC mp_obj_t zip_iternext(mp_obj_t self_in) { - mp_check_self(MP_OBJ_IS_TYPE(self_in, &mp_type_zip)); + mp_check_self(mp_obj_is_type(self_in, &mp_type_zip)); mp_obj_zip_t *self = MP_OBJ_TO_PTR(self_in); if (self->n_iters == 0) { return MP_OBJ_STOP_ITERATION; diff --git a/python/src/py/parse.c b/python/src/py/parse.c index 8c1286492..66110e7c3 100644 --- a/python/src/py/parse.c +++ b/python/src/py/parse.c @@ -338,7 +338,7 @@ bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o) { #else *o = (mp_obj_t)pns->nodes[0]; #endif - return MP_OBJ_IS_INT(*o); + return mp_obj_is_int(*o); } else { return false; } @@ -477,7 +477,7 @@ STATIC void push_result_token(parser_t *parser, uint8_t rule_id) { mp_map_elem_t *elem; if (rule_id == RULE_atom && (elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP)) != NULL) { - if (MP_OBJ_IS_SMALL_INT(elem->value)) { + if (mp_obj_is_small_int(elem->value)) { pn = mp_parse_node_new_small_int_checked(parser, elem->value); } else { pn = make_node_const_object(parser, lex->tok_line, elem->value); @@ -491,7 +491,7 @@ STATIC void push_result_token(parser_t *parser, uint8_t rule_id) { #endif } else if (lex->tok_kind == MP_TOKEN_INTEGER) { mp_obj_t o = mp_parse_num_integer(lex->vstr.buf, lex->vstr.len, 0, lex); - if (MP_OBJ_IS_SMALL_INT(o)) { + if (mp_obj_is_small_int(o)) { pn = mp_parse_node_new_small_int_checked(parser, o); } else { pn = make_node_const_object(parser, lex->tok_line, o); @@ -768,7 +768,7 @@ STATIC bool fold_constants(parser_t *parser, uint8_t rule_id, size_t num_args) { } mp_obj_t dest[2]; mp_load_method_maybe(elem->value, q_attr, dest); - if (!(dest[0] != MP_OBJ_NULL && MP_OBJ_IS_INT(dest[0]) && dest[1] == MP_OBJ_NULL)) { + if (!(dest[0] != MP_OBJ_NULL && mp_obj_is_int(dest[0]) && dest[1] == MP_OBJ_NULL)) { return false; } arg0 = dest[0]; @@ -783,7 +783,7 @@ STATIC bool fold_constants(parser_t *parser, uint8_t rule_id, size_t num_args) { for (size_t i = num_args; i > 0; i--) { pop_result(parser); } - if (MP_OBJ_IS_SMALL_INT(arg0)) { + if (mp_obj_is_small_int(arg0)) { push_result_node(parser, mp_parse_node_new_small_int_checked(parser, arg0)); } else { // TODO reuse memory for parse node struct? @@ -1145,7 +1145,7 @@ mp_parse_tree_t mp_parse(mp_lexer_t *lex, mp_parse_input_kind_t input_kind) { "unexpected indent"); } else if (lex->tok_kind == MP_TOKEN_DEDENT_MISMATCH) { exc = mp_obj_new_exception_msg(&mp_type_IndentationError, - "unindent does not match any outer indentation level"); + "unindent doesn't match any outer indent level"); } else { exc = mp_obj_new_exception_msg(&mp_type_SyntaxError, "invalid syntax"); diff --git a/python/src/py/parsenum.c b/python/src/py/parsenum.c index 74cb25fbc..ae9b83419 100644 --- a/python/src/py/parsenum.c +++ b/python/src/py/parsenum.c @@ -83,6 +83,8 @@ mp_obj_t mp_parse_num_integer(const char *restrict str_, size_t len, int base, m mp_uint_t dig = *str; if ('0' <= dig && dig <= '9') { dig -= '0'; + } else if (dig == '_') { + continue; } else { dig |= 0x20; // make digit lower-case if ('a' <= dig && dig <= 'z') { @@ -233,14 +235,19 @@ mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool // string should be a decimal number parse_dec_in_t in = PARSE_DEC_IN_INTG; bool exp_neg = false; - mp_int_t exp_val = 0; - mp_int_t exp_extra = 0; + int exp_val = 0; + int exp_extra = 0; while (str < top) { - mp_uint_t dig = *str++; + unsigned int dig = *str++; if ('0' <= dig && dig <= '9') { dig -= '0'; if (in == PARSE_DEC_IN_EXP) { - exp_val = 10 * exp_val + dig; + // don't overflow exp_val when adding next digit, instead just truncate + // it and the resulting float will still be correct, either inf or 0.0 + // (use INT_MAX/2 to allow adding exp_extra at the end without overflow) + if (exp_val < (INT_MAX / 2 - 9) / 10) { + exp_val = 10 * exp_val + dig; + } } else { if (dec_val < DEC_VAL_MAX) { // dec_val won't overflow so keep accumulating @@ -274,6 +281,8 @@ mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool } else if (allow_imag && (dig | 0x20) == 'j') { imag = true; break; + } else if (dig == '_') { + continue; } else { // unknown character str--; @@ -292,15 +301,16 @@ mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool exp_val -= SMALL_NORMAL_EXP; dec_val *= SMALL_NORMAL_VAL; } + // At this point, we need to multiply the mantissa by its base 10 exponent. If possible, // we would rather manipulate numbers that have an exact representation in IEEE754. It // turns out small positive powers of 10 do, whereas small negative powers of 10 don't. // So in that case, we'll yield a division of exact values rather than a multiplication // of slightly erroneous values. - if (exp_val < 0 && exp_val > -EXACT_POWER_OF_10) { - dec_val /= MICROPY_FLOAT_C_FUN(pow)(10, -exp_val); + if (exp_val < 0 && exp_val >= -EXACT_POWER_OF_10) { + dec_val /= MICROPY_FLOAT_C_FUN(pow)(10, -exp_val); } else { - dec_val *= MICROPY_FLOAT_C_FUN(pow)(10, exp_val); + dec_val *= MICROPY_FLOAT_C_FUN(pow)(10, exp_val); } } @@ -329,11 +339,13 @@ mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool return mp_obj_new_complex(0, dec_val); } else if (force_complex) { return mp_obj_new_complex(dec_val, 0); + } #else if (imag || force_complex) { raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, "complex values not supported"), lex); + } #endif - } else { + else { return mp_obj_new_float(dec_val); } diff --git a/python/src/py/persistentcode.c b/python/src/py/persistentcode.c index 7113b0dc2..cc6e161f4 100644 --- a/python/src/py/persistentcode.c +++ b/python/src/py/persistentcode.c @@ -38,10 +38,17 @@ #include "py/smallint.h" -// The current version of .mpy files -#define MPY_VERSION (3) +#define QSTR_LAST_STATIC MP_QSTR_zip -// The feature flags byte encodes the compile-time config options that +// Macros to encode/decode flags to/from the feature byte +#define MPY_FEATURE_ENCODE_FLAGS(flags) (flags) +#define MPY_FEATURE_DECODE_FLAGS(feat) ((feat) & 3) + +// Macros to encode/decode native architecture to/from the feature byte +#define MPY_FEATURE_ENCODE_ARCH(arch) ((arch) << 2) +#define MPY_FEATURE_DECODE_ARCH(feat) ((feat) >> 2) + +// The feature flag bits encode the compile-time config options that // affect the generate bytecode. #define MPY_FEATURE_FLAGS ( \ ((MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE) << 0) \ @@ -53,6 +60,27 @@ | ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \ ) +// Define the host architecture +#if MICROPY_EMIT_X86 +#define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_X86) +#elif MICROPY_EMIT_X64 +#define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_X64) +#elif MICROPY_EMIT_THUMB +#define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV7M) +#elif MICROPY_EMIT_ARM +#define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV6) +#elif MICROPY_EMIT_XTENSA +#define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_XTENSA) +#else +#define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_NONE) +#endif + +#if MICROPY_DYNAMIC_COMPILER +#define MPY_FEATURE_ARCH_DYNAMIC mp_dynamic_compiler.native_arch +#else +#define MPY_FEATURE_ARCH_DYNAMIC MPY_FEATURE_ARCH +#endif + #if MICROPY_PERSISTENT_CODE_LOAD || (MICROPY_PERSISTENT_CODE_SAVE && !MICROPY_DYNAMIC_COMPILER) // The bytecode will depend on the number of bits in a small-int, and // this function computes that (could make it a fixed constant, but it @@ -68,6 +96,57 @@ STATIC int mp_small_int_bits(void) { } #endif +#define QSTR_WINDOW_SIZE (32) + +typedef struct _qstr_window_t { + uint16_t idx; // indexes the head of the window + uint16_t window[QSTR_WINDOW_SIZE]; +} qstr_window_t; + +// Push a qstr to the head of the window, and the tail qstr is overwritten +STATIC void qstr_window_push(qstr_window_t *qw, qstr qst) { + qw->idx = (qw->idx + 1) % QSTR_WINDOW_SIZE; + qw->window[qw->idx] = qst; +} + +// Pull an existing qstr from within the window to the head of the window +STATIC qstr qstr_window_pull(qstr_window_t *qw, size_t idx) { + qstr qst = qw->window[idx]; + if (idx > qw->idx) { + memmove(&qw->window[idx], &qw->window[idx + 1], (QSTR_WINDOW_SIZE - idx - 1) * sizeof(uint16_t)); + qw->window[QSTR_WINDOW_SIZE - 1] = qw->window[0]; + idx = 0; + } + memmove(&qw->window[idx], &qw->window[idx + 1], (qw->idx - idx) * sizeof(uint16_t)); + qw->window[qw->idx] = qst; + return qst; +} + +#if MICROPY_PERSISTENT_CODE_LOAD + +// Access a qstr at the given index, relative to the head of the window (0=head) +STATIC qstr qstr_window_access(qstr_window_t *qw, size_t idx) { + return qstr_window_pull(qw, (qw->idx + QSTR_WINDOW_SIZE - idx) % QSTR_WINDOW_SIZE); +} + +#endif + +#if MICROPY_PERSISTENT_CODE_SAVE + +// Insert a qstr at the head of the window, either by pulling an existing one or pushing a new one +STATIC size_t qstr_window_insert(qstr_window_t *qw, qstr qst) { + for (size_t idx = 0; idx < QSTR_WINDOW_SIZE; ++idx) { + if (qw->window[idx] == qst) { + qstr_window_pull(qw, idx); + return (qw->idx + QSTR_WINDOW_SIZE - idx) % QSTR_WINDOW_SIZE; + } + } + qstr_window_push(qw, qst); + return QSTR_WINDOW_SIZE; +} + +#endif + typedef struct _bytecode_prelude_t { uint n_state; uint n_exc_stack; @@ -78,6 +157,8 @@ typedef struct _bytecode_prelude_t { uint code_info_size; } bytecode_prelude_t; +#if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_EMIT_NATIVE + // ip will point to start of opcodes // ip2 will point to simple_name, source_file qstrs STATIC void extract_prelude(const byte **ip, const byte **ip2, bytecode_prelude_t *prelude) { @@ -94,12 +175,50 @@ STATIC void extract_prelude(const byte **ip, const byte **ip2, bytecode_prelude_ } } +#endif + #endif // MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE #if MICROPY_PERSISTENT_CODE_LOAD #include "py/parsenum.h" +#if MICROPY_EMIT_NATIVE + +#if MICROPY_EMIT_THUMB +STATIC void asm_thumb_rewrite_mov(uint8_t *pc, uint16_t val) { + // high part + *(uint16_t*)pc = (*(uint16_t*)pc & 0xfbf0) | (val >> 1 & 0x0400) | (val >> 12); + // low part + *(uint16_t*)(pc + 2) = (*(uint16_t*)(pc + 2) & 0x0f00) | (val << 4 & 0x7000) | (val & 0x00ff); + +} +#endif + +STATIC void arch_link_qstr(uint8_t *pc, bool is_obj, qstr qst) { + mp_uint_t val = qst; + if (is_obj) { + val = (mp_uint_t)MP_OBJ_NEW_QSTR(qst); + } + #if MICROPY_EMIT_X86 || MICROPY_EMIT_X64 || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA + pc[0] = val & 0xff; + pc[1] = (val >> 8) & 0xff; + pc[2] = (val >> 16) & 0xff; + pc[3] = (val >> 24) & 0xff; + #elif MICROPY_EMIT_THUMB + if (is_obj) { + // qstr object, movw and movt + asm_thumb_rewrite_mov(pc, val); // movw + asm_thumb_rewrite_mov(pc + 4, val >> 16); // movt + } else { + // qstr number, movw instruction + asm_thumb_rewrite_mov(pc, val); // movw + } + #endif +} + +#endif + STATIC int read_byte(mp_reader_t *reader) { return reader->readbyte(reader->data); } @@ -110,10 +229,14 @@ STATIC void read_bytes(mp_reader_t *reader, byte *buf, size_t len) { } } -STATIC size_t read_uint(mp_reader_t *reader) { +STATIC size_t read_uint(mp_reader_t *reader, byte **out) { size_t unum = 0; for (;;) { byte b = reader->readbyte(reader->data); + if (out != NULL) { + **out = b; + ++*out; + } unum = (unum << 7) | (b & 0x7f); if ((b & 0x80) == 0) { break; @@ -122,12 +245,22 @@ STATIC size_t read_uint(mp_reader_t *reader) { return unum; } -STATIC qstr load_qstr(mp_reader_t *reader) { - size_t len = read_uint(reader); +STATIC qstr load_qstr(mp_reader_t *reader, qstr_window_t *qw) { + size_t len = read_uint(reader, NULL); + if (len == 0) { + // static qstr + return read_byte(reader); + } + if (len & 1) { + // qstr in window + return qstr_window_access(qw, len >> 1); + } + len >>= 1; char *str = m_new(char, len); read_bytes(reader, (byte*)str, len); qstr qst = qstr_from_strn(str, len); m_del(char, str, len); + qstr_window_push(qw, qst); return qst; } @@ -136,7 +269,7 @@ STATIC mp_obj_t load_obj(mp_reader_t *reader) { if (obj_type == 'e') { return MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj); } else { - size_t len = read_uint(reader); + size_t len = read_uint(reader, NULL); vstr_t vstr; vstr_init_len(&vstr, len); read_bytes(reader, (byte*)vstr.buf, len); @@ -151,64 +284,193 @@ STATIC mp_obj_t load_obj(mp_reader_t *reader) { } } -STATIC void load_bytecode_qstrs(mp_reader_t *reader, byte *ip, byte *ip_top) { +STATIC void load_prelude(mp_reader_t *reader, byte **ip, byte **ip2, bytecode_prelude_t *prelude) { + prelude->n_state = read_uint(reader, ip); + prelude->n_exc_stack = read_uint(reader, ip); + read_bytes(reader, *ip, 4); + prelude->scope_flags = *(*ip)++; + prelude->n_pos_args = *(*ip)++; + prelude->n_kwonly_args = *(*ip)++; + prelude->n_def_pos_args = *(*ip)++; + *ip2 = *ip; + prelude->code_info_size = read_uint(reader, ip2); + read_bytes(reader, *ip2, prelude->code_info_size - (*ip2 - *ip)); + *ip += prelude->code_info_size; + while ((*(*ip)++ = read_byte(reader)) != 255) { + } +} + +STATIC void load_bytecode(mp_reader_t *reader, qstr_window_t *qw, byte *ip, byte *ip_top) { while (ip < ip_top) { + *ip = read_byte(reader); size_t sz; - uint f = mp_opcode_format(ip, &sz); + uint f = mp_opcode_format(ip, &sz, false); + ++ip; + --sz; if (f == MP_OPCODE_QSTR) { - qstr qst = load_qstr(reader); - ip[1] = qst; - ip[2] = qst >> 8; + qstr qst = load_qstr(reader, qw); + *ip++ = qst; + *ip++ = qst >> 8; + sz -= 2; + } else if (f == MP_OPCODE_VAR_UINT) { + while ((*ip++ = read_byte(reader)) & 0x80) { + } } + read_bytes(reader, ip, sz); ip += sz; } } -STATIC mp_raw_code_t *load_raw_code(mp_reader_t *reader) { - // load bytecode - size_t bc_len = read_uint(reader); - byte *bytecode = m_new(byte, bc_len); - read_bytes(reader, bytecode, bc_len); +STATIC mp_raw_code_t *load_raw_code(mp_reader_t *reader, qstr_window_t *qw) { + // Load function kind and data length + size_t kind_len = read_uint(reader, NULL); + int kind = (kind_len & 3) + MP_CODE_BYTECODE; + size_t fun_data_len = kind_len >> 2; - // extract prelude - const byte *ip = bytecode; - const byte *ip2; - bytecode_prelude_t prelude; - extract_prelude(&ip, &ip2, &prelude); - - // load qstrs and link global qstr ids into bytecode - qstr simple_name = load_qstr(reader); - qstr source_file = load_qstr(reader); - ((byte*)ip2)[0] = simple_name; ((byte*)ip2)[1] = simple_name >> 8; - ((byte*)ip2)[2] = source_file; ((byte*)ip2)[3] = source_file >> 8; - load_bytecode_qstrs(reader, (byte*)ip, bytecode + bc_len); - - // load constant table - size_t n_obj = read_uint(reader); - size_t n_raw_code = read_uint(reader); - mp_uint_t *const_table = m_new(mp_uint_t, prelude.n_pos_args + prelude.n_kwonly_args + n_obj + n_raw_code); - mp_uint_t *ct = const_table; - for (size_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { - *ct++ = (mp_uint_t)MP_OBJ_NEW_QSTR(load_qstr(reader)); + #if !MICROPY_EMIT_NATIVE + if (kind != MP_CODE_BYTECODE) { + mp_raise_ValueError("incompatible .mpy file"); } - for (size_t i = 0; i < n_obj; ++i) { - *ct++ = (mp_uint_t)load_obj(reader); - } - for (size_t i = 0; i < n_raw_code; ++i) { - *ct++ = (mp_uint_t)(uintptr_t)load_raw_code(reader); + #endif + + uint8_t *fun_data = NULL; + byte *ip2; + bytecode_prelude_t prelude = {0}; + #if MICROPY_EMIT_NATIVE + size_t prelude_offset; + mp_uint_t type_sig = 0; + size_t n_qstr_link = 0; + #endif + + if (kind == MP_CODE_BYTECODE) { + // Allocate memory for the bytecode + fun_data = m_new(uint8_t, fun_data_len); + + // Load prelude + byte *ip = fun_data; + load_prelude(reader, &ip, &ip2, &prelude); + + // Load bytecode + load_bytecode(reader, qw, ip, fun_data + fun_data_len); + + #if MICROPY_EMIT_NATIVE + } else { + // Allocate memory for native data and load it + size_t fun_alloc; + MP_PLAT_ALLOC_EXEC(fun_data_len, (void**)&fun_data, &fun_alloc); + read_bytes(reader, fun_data, fun_data_len); + + if (kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER) { + // Parse qstr link table and link native code + n_qstr_link = read_uint(reader, NULL); + for (size_t i = 0; i < n_qstr_link; ++i) { + size_t off = read_uint(reader, NULL); + qstr qst = load_qstr(reader, qw); + uint8_t *dest = fun_data + (off >> 2); + if ((off & 3) == 0) { + // Generic 16-bit link + dest[0] = qst & 0xff; + dest[1] = (qst >> 8) & 0xff; + } else { + // Architecture-specific link + arch_link_qstr(dest, (off & 3) == 2, qst); + } + } + } + + if (kind == MP_CODE_NATIVE_PY) { + // Extract prelude for later use + prelude_offset = read_uint(reader, NULL); + const byte *ip = fun_data + prelude_offset; + extract_prelude(&ip, (const byte**)&ip2, &prelude); + } else { + // Load basic scope info for viper and asm + prelude.scope_flags = read_uint(reader, NULL); + prelude.n_pos_args = 0; + prelude.n_kwonly_args = 0; + if (kind == MP_CODE_NATIVE_ASM) { + prelude.n_pos_args = read_uint(reader, NULL); + type_sig = read_uint(reader, NULL); + } + } + #endif } - // create raw_code and return it + if (kind == MP_CODE_BYTECODE || kind == MP_CODE_NATIVE_PY) { + // Load qstrs in prelude + qstr simple_name = load_qstr(reader, qw); + qstr source_file = load_qstr(reader, qw); + ip2[0] = simple_name; ip2[1] = simple_name >> 8; + ip2[2] = source_file; ip2[3] = source_file >> 8; + } + + mp_uint_t *const_table = NULL; + if (kind != MP_CODE_NATIVE_ASM) { + // Load constant table for bytecode, native and viper + + // Number of entries in constant table + size_t n_obj = read_uint(reader, NULL); + size_t n_raw_code = read_uint(reader, NULL); + + // Allocate constant table + size_t n_alloc = prelude.n_pos_args + prelude.n_kwonly_args + n_obj + n_raw_code; + if (kind != MP_CODE_BYTECODE) { + ++n_alloc; // additional entry for mp_fun_table + } + const_table = m_new(mp_uint_t, n_alloc); + mp_uint_t *ct = const_table; + + // Load function argument names (initial entries in const_table) + // (viper has n_pos_args=n_kwonly_args=0 so doesn't load any qstrs here) + for (size_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { + *ct++ = (mp_uint_t)MP_OBJ_NEW_QSTR(load_qstr(reader, qw)); + } + + #if MICROPY_EMIT_NATIVE + if (kind != MP_CODE_BYTECODE) { + // Populate mp_fun_table entry + *ct++ = (mp_uint_t)(uintptr_t)mp_fun_table; + } + #endif + + // Load constant objects and raw code children + for (size_t i = 0; i < n_obj; ++i) { + *ct++ = (mp_uint_t)load_obj(reader); + } + for (size_t i = 0; i < n_raw_code; ++i) { + *ct++ = (mp_uint_t)(uintptr_t)load_raw_code(reader, qw); + } + } + + // Create raw_code and return it mp_raw_code_t *rc = mp_emit_glue_new_raw_code(); - mp_emit_glue_assign_bytecode(rc, bytecode, - #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS - bc_len, + if (kind == MP_CODE_BYTECODE) { + mp_emit_glue_assign_bytecode(rc, fun_data, + #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS + fun_data_len, + #endif + const_table, + #if MICROPY_PERSISTENT_CODE_SAVE + n_obj, n_raw_code, + #endif + prelude.scope_flags); + + #if MICROPY_EMIT_NATIVE + } else { + #if defined(MP_PLAT_COMMIT_EXEC) + fun_data = MP_PLAT_COMMIT_EXEC(fun_data, fun_data_len); #endif - const_table, - #if MICROPY_PERSISTENT_CODE_SAVE - n_obj, n_raw_code, - #endif - prelude.scope_flags); + + mp_emit_glue_assign_native(rc, kind, + fun_data, fun_data_len, const_table, + #if MICROPY_PERSISTENT_CODE_SAVE + prelude_offset, + n_obj, n_raw_code, + n_qstr_link, NULL, + #endif + prelude.n_pos_args, prelude.scope_flags, type_sig); + #endif + } return rc; } @@ -217,11 +479,18 @@ mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader) { read_bytes(reader, header, sizeof(header)); if (header[0] != 'M' || header[1] != MPY_VERSION - || header[2] != MPY_FEATURE_FLAGS - || header[3] > mp_small_int_bits()) { + || MPY_FEATURE_DECODE_FLAGS(header[2]) != MPY_FEATURE_FLAGS + || header[3] > mp_small_int_bits() + || read_uint(reader, NULL) > QSTR_WINDOW_SIZE) { mp_raise_ValueError("incompatible .mpy file"); } - mp_raw_code_t *rc = load_raw_code(reader); + if (MPY_FEATURE_DECODE_ARCH(header[2]) != MP_NATIVE_ARCH_NONE + && MPY_FEATURE_DECODE_ARCH(header[2]) != MPY_FEATURE_ARCH) { + mp_raise_ValueError("incompatible .mpy arch"); + } + qstr_window_t qw; + qw.idx = 0; + mp_raw_code_t *rc = load_raw_code(reader, &qw); reader->close(reader->data); return rc; } @@ -232,12 +501,16 @@ mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len) { return mp_raw_code_load(&reader); } +#if MICROPY_HAS_FILE_READER + mp_raw_code_t *mp_raw_code_load_file(const char *filename) { mp_reader_t reader; mp_reader_new_file(&reader, filename); return mp_raw_code_load(&reader); } +#endif // MICROPY_HAS_FILE_READER + #endif // MICROPY_PERSISTENT_CODE_LOAD #if MICROPY_PERSISTENT_CODE_SAVE @@ -260,22 +533,34 @@ STATIC void mp_print_uint(mp_print_t *print, size_t n) { print->print_strn(print->data, (char*)p, buf + sizeof(buf) - p); } -STATIC void save_qstr(mp_print_t *print, qstr qst) { +STATIC void save_qstr(mp_print_t *print, qstr_window_t *qw, qstr qst) { + if (qst <= QSTR_LAST_STATIC) { + // encode static qstr + byte buf[2] = {0, qst & 0xff}; + mp_print_bytes(print, buf, 2); + return; + } + size_t idx = qstr_window_insert(qw, qst); + if (idx < QSTR_WINDOW_SIZE) { + // qstr found in window, encode index to it + mp_print_uint(print, idx << 1 | 1); + return; + } size_t len; const byte *str = qstr_data(qst, &len); - mp_print_uint(print, len); + mp_print_uint(print, len << 1); mp_print_bytes(print, str, len); } STATIC void save_obj(mp_print_t *print, mp_obj_t o) { - if (MP_OBJ_IS_STR_OR_BYTES(o)) { + if (mp_obj_is_str_or_bytes(o)) { byte obj_type; - if (MP_OBJ_IS_STR(o)) { + if (mp_obj_is_str(o)) { obj_type = 's'; } else { obj_type = 'b'; } - mp_uint_t len; + size_t len; const char *str = mp_obj_str_get_data(o, &len); mp_print_bytes(print, &obj_type, 1); mp_print_uint(print, len); @@ -287,10 +572,10 @@ STATIC void save_obj(mp_print_t *print, mp_obj_t o) { // we save numbers using a simplistic text representation // TODO could be improved byte obj_type; - if (MP_OBJ_IS_TYPE(o, &mp_type_int)) { + if (mp_obj_is_type(o, &mp_type_int)) { obj_type = 'i'; #if MICROPY_PY_BUILTINS_COMPLEX - } else if (MP_OBJ_IS_TYPE(o, &mp_type_complex)) { + } else if (mp_obj_is_type(o, &mp_type_complex)) { obj_type = 'c'; #endif } else { @@ -308,52 +593,127 @@ STATIC void save_obj(mp_print_t *print, mp_obj_t o) { } } -STATIC void save_bytecode_qstrs(mp_print_t *print, const byte *ip, const byte *ip_top) { +STATIC void save_bytecode(mp_print_t *print, qstr_window_t *qw, const byte *ip, const byte *ip_top) { while (ip < ip_top) { size_t sz; - uint f = mp_opcode_format(ip, &sz); + uint f = mp_opcode_format(ip, &sz, true); if (f == MP_OPCODE_QSTR) { + mp_print_bytes(print, ip, 1); qstr qst = ip[1] | (ip[2] << 8); - save_qstr(print, qst); + save_qstr(print, qw, qst); + ip += 3; + sz -= 3; } + mp_print_bytes(print, ip, sz); ip += sz; } } -STATIC void save_raw_code(mp_print_t *print, mp_raw_code_t *rc) { - if (rc->kind != MP_CODE_BYTECODE) { - mp_raise_ValueError("can only save bytecode"); +STATIC void save_raw_code(mp_print_t *print, mp_raw_code_t *rc, qstr_window_t *qstr_window) { + // Save function kind and data length + mp_print_uint(print, (rc->fun_data_len << 2) | (rc->kind - MP_CODE_BYTECODE)); + + const byte *ip2; + bytecode_prelude_t prelude; + + if (rc->kind == MP_CODE_BYTECODE) { + // Save prelude + const byte *ip = rc->fun_data; + extract_prelude(&ip, &ip2, &prelude); + size_t prelude_len = ip - (const byte*)rc->fun_data; + const byte *ip_top = (const byte*)rc->fun_data + rc->fun_data_len; + mp_print_bytes(print, rc->fun_data, prelude_len); + + // Save bytecode + save_bytecode(print, qstr_window, ip, ip_top); + } else { + // Save native code + mp_print_bytes(print, rc->fun_data, rc->fun_data_len); + + if (rc->kind == MP_CODE_NATIVE_PY || rc->kind == MP_CODE_NATIVE_VIPER) { + // Save qstr link table for native code + mp_print_uint(print, rc->n_qstr); + for (size_t i = 0; i < rc->n_qstr; ++i) { + mp_print_uint(print, rc->qstr_link[i].off); + save_qstr(print, qstr_window, rc->qstr_link[i].qst); + } + } + + if (rc->kind == MP_CODE_NATIVE_PY) { + // Save prelude size, and extract prelude for later use + mp_print_uint(print, rc->prelude_offset); + const byte *ip = (const byte*)rc->fun_data + rc->prelude_offset; + extract_prelude(&ip, &ip2, &prelude); + } else { + // Save basic scope info for viper and asm + mp_print_uint(print, rc->scope_flags); + prelude.n_pos_args = 0; + prelude.n_kwonly_args = 0; + if (rc->kind == MP_CODE_NATIVE_ASM) { + mp_print_uint(print, rc->n_pos_args); + mp_print_uint(print, rc->type_sig); + } + } } - // save bytecode - mp_print_uint(print, rc->data.u_byte.bc_len); - mp_print_bytes(print, rc->data.u_byte.bytecode, rc->data.u_byte.bc_len); + if (rc->kind == MP_CODE_BYTECODE || rc->kind == MP_CODE_NATIVE_PY) { + // Save qstrs in prelude + save_qstr(print, qstr_window, ip2[0] | (ip2[1] << 8)); // simple_name + save_qstr(print, qstr_window, ip2[2] | (ip2[3] << 8)); // source_file + } - // extract prelude - const byte *ip = rc->data.u_byte.bytecode; + if (rc->kind != MP_CODE_NATIVE_ASM) { + // Save constant table for bytecode, native and viper + + // Number of entries in constant table + mp_print_uint(print, rc->n_obj); + mp_print_uint(print, rc->n_raw_code); + + const mp_uint_t *const_table = rc->const_table; + + // Save function argument names (initial entries in const_table) + // (viper has n_pos_args=n_kwonly_args=0 so doesn't save any qstrs here) + for (size_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { + mp_obj_t o = (mp_obj_t)*const_table++; + save_qstr(print, qstr_window, MP_OBJ_QSTR_VALUE(o)); + } + + if (rc->kind != MP_CODE_BYTECODE) { + // Skip saving mp_fun_table entry + ++const_table; + } + + // Save constant objects and raw code children + for (size_t i = 0; i < rc->n_obj; ++i) { + save_obj(print, (mp_obj_t)*const_table++); + } + for (size_t i = 0; i < rc->n_raw_code; ++i) { + save_raw_code(print, (mp_raw_code_t*)(uintptr_t)*const_table++, qstr_window); + } + } +} + +STATIC bool mp_raw_code_has_native(mp_raw_code_t *rc) { + if (rc->kind != MP_CODE_BYTECODE) { + return true; + } + + const byte *ip = rc->fun_data; const byte *ip2; bytecode_prelude_t prelude; extract_prelude(&ip, &ip2, &prelude); - // save qstrs - save_qstr(print, ip2[0] | (ip2[1] << 8)); // simple_name - save_qstr(print, ip2[2] | (ip2[3] << 8)); // source_file - save_bytecode_qstrs(print, ip, rc->data.u_byte.bytecode + rc->data.u_byte.bc_len); + const mp_uint_t *const_table = rc->const_table + + prelude.n_pos_args + prelude.n_kwonly_args + + rc->n_obj; - // save constant table - mp_print_uint(print, rc->data.u_byte.n_obj); - mp_print_uint(print, rc->data.u_byte.n_raw_code); - const mp_uint_t *const_table = rc->data.u_byte.const_table; - for (uint i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) { - mp_obj_t o = (mp_obj_t)*const_table++; - save_qstr(print, MP_OBJ_QSTR_VALUE(o)); - } - for (uint i = 0; i < rc->data.u_byte.n_obj; ++i) { - save_obj(print, (mp_obj_t)*const_table++); - } - for (uint i = 0; i < rc->data.u_byte.n_raw_code; ++i) { - save_raw_code(print, (mp_raw_code_t*)(uintptr_t)*const_table++); + for (size_t i = 0; i < rc->n_raw_code; ++i) { + if (mp_raw_code_has_native((mp_raw_code_t*)(uintptr_t)*const_table++)) { + return true; + } } + + return false; } void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print) { @@ -362,16 +722,27 @@ void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print) { // byte version // byte feature flags // byte number of bits in a small int - byte header[4] = {'M', MPY_VERSION, MPY_FEATURE_FLAGS_DYNAMIC, + // uint size of qstr window + byte header[4] = { + 'M', + MPY_VERSION, + MPY_FEATURE_ENCODE_FLAGS(MPY_FEATURE_FLAGS_DYNAMIC), #if MICROPY_DYNAMIC_COMPILER mp_dynamic_compiler.small_int_bits, #else mp_small_int_bits(), #endif }; + if (mp_raw_code_has_native(rc)) { + header[2] |= MPY_FEATURE_ENCODE_ARCH(MPY_FEATURE_ARCH_DYNAMIC); + } mp_print_bytes(print, header, sizeof(header)); + mp_print_uint(print, QSTR_WINDOW_SIZE); - save_raw_code(print, rc); + qstr_window_t qw; + qw.idx = 0; + memset(qw.window, 0, sizeof(qw.window)); + save_raw_code(print, rc, &qw); } // here we define mp_raw_code_save_file depending on the port diff --git a/python/src/py/persistentcode.h b/python/src/py/persistentcode.h index d04e0b633..2074c64fe 100644 --- a/python/src/py/persistentcode.h +++ b/python/src/py/persistentcode.h @@ -30,6 +30,22 @@ #include "py/reader.h" #include "py/emitglue.h" +// The current version of .mpy files +#define MPY_VERSION 4 + +enum { + MP_NATIVE_ARCH_NONE = 0, + MP_NATIVE_ARCH_X86, + MP_NATIVE_ARCH_X64, + MP_NATIVE_ARCH_ARMV6, + MP_NATIVE_ARCH_ARMV6M, + MP_NATIVE_ARCH_ARMV7M, + MP_NATIVE_ARCH_ARMV7EM, + MP_NATIVE_ARCH_ARMV7EMSP, + MP_NATIVE_ARCH_ARMV7EMDP, + MP_NATIVE_ARCH_XTENSA, +}; + mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader); mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len); mp_raw_code_t *mp_raw_code_load_file(const char *filename); diff --git a/python/src/py/qstr.c b/python/src/py/qstr.c index 08c3e2505..e6f86401a 100644 --- a/python/src/py/qstr.c +++ b/python/src/py/qstr.c @@ -80,6 +80,10 @@ #define QSTR_EXIT() #endif +// Initial number of entries for qstr pool, set so that the first dynamically +// allocated pool is twice this size. The value here must be <= MP_QSTRnumber_of. +#define MICROPY_ALLOC_QSTR_ENTRIES_INIT (10) + // this must match the equivalent function in makeqstrdata.py mp_uint_t qstr_compute_hash(const byte *data, size_t len) { // djb2 algorithm; see http://www.cse.yorku.ca/~oz/hash.html @@ -98,7 +102,7 @@ mp_uint_t qstr_compute_hash(const byte *data, size_t len) { const qstr_pool_t mp_qstr_const_pool = { NULL, // no previous pool 0, // no previous pool - 10, // set so that the first dynamically allocated pool is twice this size; must be <= the len (just below) + MICROPY_ALLOC_QSTR_ENTRIES_INIT, MP_QSTRnumber_of, // corresponds to number of strings in array just below { #ifndef NO_QSTR @@ -141,14 +145,19 @@ STATIC qstr qstr_add(const byte *q_ptr) { // make sure we have room in the pool for a new qstr if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) { - qstr_pool_t *pool = m_new_obj_var_maybe(qstr_pool_t, const char*, MP_STATE_VM(last_pool)->alloc * 2); + size_t new_alloc = MP_STATE_VM(last_pool)->alloc * 2; + #ifdef MICROPY_QSTR_EXTRA_POOL + // Put a lower bound on the allocation size in case the extra qstr pool has few entries + new_alloc = MAX(MICROPY_ALLOC_QSTR_ENTRIES_INIT, new_alloc); + #endif + qstr_pool_t *pool = m_new_obj_var_maybe(qstr_pool_t, const char*, new_alloc); if (pool == NULL) { QSTR_EXIT(); - m_malloc_fail(MP_STATE_VM(last_pool)->alloc * 2); + m_malloc_fail(new_alloc); } pool->prev = MP_STATE_VM(last_pool); pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len; - pool->alloc = MP_STATE_VM(last_pool)->alloc * 2; + pool->alloc = new_alloc; pool->len = 0; MP_STATE_VM(last_pool) = pool; DEBUG_printf("QSTR: allocate new pool of size %d\n", MP_STATE_VM(last_pool)->alloc); @@ -242,7 +251,8 @@ qstr qstr_from_strn(const char *str, size_t len) { } mp_uint_t qstr_hash(qstr q) { - return Q_GET_HASH(find_qstr(q)); + const byte *qd = find_qstr(q); + return Q_GET_HASH(qd); } size_t qstr_len(qstr q) { diff --git a/python/src/py/qstrdefs.h b/python/src/py/qstrdefs.h index a60905812..5c8b13b47 100644 --- a/python/src/py/qstrdefs.h +++ b/python/src/py/qstrdefs.h @@ -37,8 +37,13 @@ Q() Q(*) Q(_) Q(/) +#if MICROPY_PY_BUILTINS_STR_OP_MODULO Q(%#o) Q(%#x) +#else +Q({:#o}) +Q({:#x}) +#endif Q({:#b}) Q( ) Q(\n) diff --git a/python/src/py/reader.c b/python/src/py/reader.c index c4d18d53d..80364104b 100644 --- a/python/src/py/reader.c +++ b/python/src/py/reader.c @@ -124,6 +124,8 @@ void mp_reader_new_file_from_fd(mp_reader_t *reader, int fd, bool close_fd) { reader->close = mp_reader_posix_close; } +#if !MICROPY_VFS_POSIX +// If MICROPY_VFS_POSIX is defined then this function is provided by the VFS layer void mp_reader_new_file(mp_reader_t *reader, const char *filename) { int fd = open(filename, O_RDONLY, 0644); if (fd < 0) { @@ -131,5 +133,6 @@ void mp_reader_new_file(mp_reader_t *reader, const char *filename) { } mp_reader_new_file_from_fd(reader, fd, true); } +#endif #endif diff --git a/python/src/py/repl.c b/python/src/py/repl.c index 5dce8bbb7..da0fefb3a 100644 --- a/python/src/py/repl.c +++ b/python/src/py/repl.c @@ -106,8 +106,13 @@ bool mp_repl_continue_with_input(const char *input) { } } - // continue if unmatched brackets or quotes - if (n_paren > 0 || n_brack > 0 || n_brace > 0 || in_quote == Q_3_SINGLE || in_quote == Q_3_DOUBLE) { + // continue if unmatched 3-quotes + if (in_quote == Q_3_SINGLE || in_quote == Q_3_DOUBLE) { + return true; + } + + // continue if unmatched brackets, but only if not in a 1-quote + if ((n_paren > 0 || n_brack > 0 || n_brace > 0) && in_quote == Q_NONE) { return true; } diff --git a/python/src/py/runtime.c b/python/src/py/runtime.c index 8f1063076..e50256605 100644 --- a/python/src/py/runtime.c +++ b/python/src/py/runtime.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2018 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -62,7 +63,8 @@ void mp_init(void) { MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL; #if MICROPY_ENABLE_SCHEDULER MP_STATE_VM(sched_state) = MP_SCHED_IDLE; - MP_STATE_VM(sched_sp) = 0; + MP_STATE_VM(sched_idx) = 0; + MP_STATE_VM(sched_len) = 0; #endif #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF @@ -108,12 +110,6 @@ void mp_init(void) { for (size_t i = 0; i < MICROPY_PY_OS_DUPTERM; ++i) { MP_STATE_VM(dupterm_objs[i]) = MP_OBJ_NULL; } - MP_STATE_VM(dupterm_arr_obj) = MP_OBJ_NULL; - #endif - - #if MICROPY_FSUSERMOUNT - // zero out the pointers to the user-mounted devices - memset(MP_STATE_VM(fs_user_mount), 0, sizeof(MP_STATE_VM(fs_user_mount))); #endif #if MICROPY_VFS @@ -130,11 +126,13 @@ void mp_init(void) { } void mp_deinit(void) { + MP_THREAD_GIL_EXIT(); + //mp_obj_dict_free(&dict_main); //mp_map_deinit(&MP_STATE_VM(mp_loaded_modules_map)); // call port specific deinitialization if any -#ifdef MICROPY_PORT_INIT_FUNC +#ifdef MICROPY_PORT_DEINIT_FUNC MICROPY_PORT_DEINIT_FUNC; #endif } @@ -172,7 +170,7 @@ mp_obj_t mp_load_global(qstr qst) { mp_raise_msg(&mp_type_NameError, "name not defined"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_NameError, - "name '%q' is not defined", qst)); + "name '%q' isn't defined", qst)); } } } @@ -221,7 +219,7 @@ mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg) { if (op == MP_UNARY_OP_NOT) { // "not x" is the negative of whether "x" is true per Python semantics return mp_obj_new_bool(mp_obj_is_true(arg) == 0); - } else if (MP_OBJ_IS_SMALL_INT(arg)) { + } else if (mp_obj_is_small_int(arg)) { mp_int_t val = MP_OBJ_SMALL_INT_VALUE(arg); switch (op) { case MP_UNARY_OP_BOOL: @@ -229,6 +227,7 @@ mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg) { case MP_UNARY_OP_HASH: return arg; case MP_UNARY_OP_POSITIVE: + case MP_UNARY_OP_INT: return arg; case MP_UNARY_OP_NEGATIVE: // check for overflow @@ -250,7 +249,7 @@ mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg) { assert(op == MP_UNARY_OP_INVERT); return MP_OBJ_NEW_SMALL_INT(~val); } - } else if (op == MP_UNARY_OP_HASH && MP_OBJ_IS_STR_OR_BYTES(arg)) { + } else if (op == MP_UNARY_OP_HASH && mp_obj_is_str_or_bytes(arg)) { // fast path for hashing str/bytes GET_STR_HASH(arg, h); if (h == 0) { @@ -266,12 +265,23 @@ mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg) { return result; } } + // With MP_UNARY_OP_INT, mp_unary_op() becomes a fallback for mp_obj_get_int(). + // In this case provide a more focused error message to not confuse, e.g. chr(1.0) if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) { - mp_raise_TypeError("unsupported type for operator"); + if (op == MP_UNARY_OP_INT) { + mp_raise_TypeError("can't convert to int"); + } else { + mp_raise_TypeError("unsupported type for operator"); + } } else { - nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "unsupported type for %q: '%s'", - mp_unary_op_method_name[op], mp_obj_get_type_str(arg))); + if (op == MP_UNARY_OP_INT) { + nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, + "can't convert %s to int", mp_obj_get_type_str(arg))); + } else { + nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, + "unsupported type for %q: '%s'", + mp_unary_op_method_name[op], mp_obj_get_type_str(arg))); + } } } } @@ -319,7 +329,7 @@ mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { } else { return mp_const_false; } - } else if (MP_OBJ_IS_TYPE(rhs, &mp_type_tuple)) { + } else if (mp_obj_is_type(rhs, &mp_type_tuple)) { mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(rhs); for (size_t i = 0; i < tuple->len; i++) { rhs = tuple->items[i]; @@ -335,9 +345,9 @@ mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { goto unsupported_op; } - if (MP_OBJ_IS_SMALL_INT(lhs)) { + if (mp_obj_is_small_int(lhs)) { mp_int_t lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs); - if (MP_OBJ_IS_SMALL_INT(rhs)) { + if (mp_obj_is_small_int(rhs)) { mp_int_t rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs); // This is a binary operation: lhs_val op rhs_val // We need to be careful to handle overflow; see CERT INT32-C @@ -446,8 +456,7 @@ mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { case MP_BINARY_OP_INPLACE_POWER: if (rhs_val < 0) { #if MICROPY_PY_BUILTINS_FLOAT - lhs = mp_obj_new_float(lhs_val); - goto generic_binary_op; + return mp_obj_float_binary_op(op, lhs_val, rhs); #else mp_raise_ValueError("negative power with no float support"); #endif @@ -497,11 +506,11 @@ mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { default: goto unsupported_op; } - // TODO: We just should make mp_obj_new_int() inline and use that + // This is an inlined version of mp_obj_new_int, for speed if (MP_SMALL_INT_FITS(lhs_val)) { return MP_OBJ_NEW_SMALL_INT(lhs_val); } else { - return mp_obj_new_int(lhs_val); + return mp_obj_new_int_from_ll(lhs_val); } #if MICROPY_PY_BUILTINS_FLOAT } else if (mp_obj_is_float(rhs)) { @@ -512,7 +521,7 @@ mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { return res; } #if MICROPY_PY_BUILTINS_COMPLEX - } else if (MP_OBJ_IS_TYPE(rhs, &mp_type_complex)) { + } else if (mp_obj_is_type(rhs, &mp_type_complex)) { mp_obj_t res = mp_obj_complex_binary_op(op, lhs_val, 0, rhs); if (res == MP_OBJ_NULL) { goto unsupported_op; @@ -582,7 +591,7 @@ unsupported_op: } zero_division: - mp_raise_msg(&mp_type_ZeroDivisionError, "division by zero"); + mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero"); } mp_obj_t mp_call_function_0(mp_obj_t fun) { @@ -619,7 +628,7 @@ mp_obj_t mp_call_function_n_kw(mp_obj_t fun_in, size_t n_args, size_t n_kw, cons mp_raise_TypeError("object not callable"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object is not callable", mp_obj_get_type_str(fun_in))); + "'%s' object isn't callable", mp_obj_get_type_str(fun_in))); } } @@ -659,7 +668,7 @@ void mp_call_prepare_args_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_ // Try to get a hint for the size of the kw_dict uint kw_dict_len = 0; - if (kw_dict != MP_OBJ_NULL && MP_OBJ_IS_TYPE(kw_dict, &mp_type_dict)) { + if (kw_dict != MP_OBJ_NULL && mp_obj_is_type(kw_dict, &mp_type_dict)) { kw_dict_len = mp_obj_dict_len(kw_dict); } @@ -681,7 +690,7 @@ void mp_call_prepare_args_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_ mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t); args2_len += n_args; - } else if (MP_OBJ_IS_TYPE(pos_seq, &mp_type_tuple) || MP_OBJ_IS_TYPE(pos_seq, &mp_type_list)) { + } else if (mp_obj_is_type(pos_seq, &mp_type_tuple) || mp_obj_is_type(pos_seq, &mp_type_list)) { // optimise the case of a tuple and list // get the items @@ -742,15 +751,15 @@ void mp_call_prepare_args_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_ // Note that it can be arbitrary iterator. if (kw_dict == MP_OBJ_NULL) { // pass - } else if (MP_OBJ_IS_TYPE(kw_dict, &mp_type_dict)) { + } else if (mp_obj_is_type(kw_dict, &mp_type_dict)) { // dictionary mp_map_t *map = mp_obj_dict_get_map(kw_dict); assert(args2_len + 2 * map->used <= args2_alloc); // should have enough, since kw_dict_len is in this case hinted correctly above for (size_t i = 0; i < map->alloc; i++) { - if (MP_MAP_SLOT_IS_FILLED(map, i)) { + if (mp_map_slot_is_filled(map, i)) { // the key must be a qstr, so intern it if it's a string mp_obj_t key = map->table[i].key; - if (!MP_OBJ_IS_QSTR(key)) { + if (!mp_obj_is_qstr(key)) { key = mp_obj_str_intern_checked(key); } args2[args2_len++] = key; @@ -780,7 +789,7 @@ void mp_call_prepare_args_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_ } // the key must be a qstr, so intern it if it's a string - if (!MP_OBJ_IS_QSTR(key)) { + if (!mp_obj_is_qstr(key)) { key = mp_obj_str_intern_checked(key); } @@ -815,7 +824,7 @@ mp_obj_t mp_call_method_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_ob // unpacked items are stored in reverse order into the array pointed to by items void mp_unpack_sequence(mp_obj_t seq_in, size_t num, mp_obj_t *items) { size_t seq_len; - if (MP_OBJ_IS_TYPE(seq_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(seq_in, &mp_type_list)) { + if (mp_obj_is_type(seq_in, &mp_type_tuple) || mp_obj_is_type(seq_in, &mp_type_list)) { mp_obj_t *seq_items; mp_obj_get_array(seq_in, &seq_len, &seq_items); if (seq_len < num) { @@ -865,9 +874,13 @@ void mp_unpack_ex(mp_obj_t seq_in, size_t num_in, mp_obj_t *items) { size_t num_right = (num_in >> 8) & 0xff; DEBUG_OP_printf("unpack ex " UINT_FMT " " UINT_FMT "\n", num_left, num_right); size_t seq_len; - if (MP_OBJ_IS_TYPE(seq_in, &mp_type_tuple) || MP_OBJ_IS_TYPE(seq_in, &mp_type_list)) { + if (mp_obj_is_type(seq_in, &mp_type_tuple) || mp_obj_is_type(seq_in, &mp_type_list)) { + // Make the seq variable volatile so the compiler keeps a reference to it, + // since if it's a tuple then seq_items points to the interior of the GC cell + // and mp_obj_new_list may trigger a GC which doesn't trace this and reclaims seq. + volatile mp_obj_t seq = seq_in; mp_obj_t *seq_items; - mp_obj_get_array(seq_in, &seq_len, &seq_items); + mp_obj_get_array(seq, &seq_len, &seq_items); if (seq_len < num_left + num_right) { goto too_short; } @@ -878,6 +891,7 @@ void mp_unpack_ex(mp_obj_t seq_in, size_t num_in, mp_obj_t *items) { for (size_t i = 0; i < num_left; i++) { items[num_right + 1 + i] = seq_items[num_left - 1 - i]; } + seq = MP_OBJ_NULL; } else { // Generic iterable; this gets a bit messy: we unpack known left length to the // items destination array, then the rest to a dynamically created list. Once the @@ -979,10 +993,10 @@ STATIC mp_obj_t mp_obj_new_checked_fun(const mp_obj_type_t *type, mp_obj_t fun) // Conversion means dealing with static/class methods, callables, and values. // see http://docs.python.org/3/howto/descriptor.html void mp_convert_member_lookup(mp_obj_t self, const mp_obj_type_t *type, mp_obj_t member, mp_obj_t *dest) { - if (MP_OBJ_IS_TYPE(member, &mp_type_staticmethod)) { + if (mp_obj_is_type(member, &mp_type_staticmethod)) { // return just the function dest[0] = ((mp_obj_static_class_method_t*)MP_OBJ_TO_PTR(member))->fun; - } else if (MP_OBJ_IS_TYPE(member, &mp_type_classmethod)) { + } else if (mp_obj_is_type(member, &mp_type_classmethod)) { // return a bound method, with self being the type of this object // this type should be the type of the original instance, not the base // type (which is what is passed in the 'type' argument to this function) @@ -991,11 +1005,11 @@ void mp_convert_member_lookup(mp_obj_t self, const mp_obj_type_t *type, mp_obj_t } dest[0] = ((mp_obj_static_class_method_t*)MP_OBJ_TO_PTR(member))->fun; dest[1] = MP_OBJ_FROM_PTR(type); - } else if (MP_OBJ_IS_TYPE(member, &mp_type_type)) { + } else if (mp_obj_is_type(member, &mp_type_type)) { // Don't try to bind types (even though they're callable) dest[0] = member; - } else if (MP_OBJ_IS_FUN(member) - || (MP_OBJ_IS_OBJ(member) + } else if (mp_obj_is_fun(member) + || (mp_obj_is_obj(member) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(member))->type->name == MP_QSTR_closure || ((mp_obj_base_t*)MP_OBJ_TO_PTR(member))->type->name == MP_QSTR_generator))) { // only functions, closures and generators objects can be bound to self @@ -1035,14 +1049,13 @@ void mp_load_method_maybe(mp_obj_t obj, qstr attr, mp_obj_t *dest) { mp_obj_type_t *type = mp_obj_get_type(obj); // look for built-in names - if (0) { #if MICROPY_CPYTHON_COMPAT - } else if (attr == MP_QSTR___class__) { + if (attr == MP_QSTR___class__) { // a.__class__ is equivalent to type(a) dest[0] = MP_OBJ_FROM_PTR(type); + } else #endif - - } else if (attr == MP_QSTR___next__ && type->iternext != NULL) { + if (attr == MP_QSTR___next__ && type->iternext != NULL) { dest[0] = MP_OBJ_FROM_PTR(&mp_builtin_next_obj); dest[1] = obj; @@ -1073,7 +1086,7 @@ void mp_load_method(mp_obj_t base, qstr attr, mp_obj_t *dest) { mp_raise_msg(&mp_type_AttributeError, "no such attribute"); } else { // following CPython, we give a more detailed error message for type objects - if (MP_OBJ_IS_TYPE(base, &mp_type_type)) { + if (mp_obj_is_type(base, &mp_type_type)) { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_AttributeError, "type object '%q' has no attribute '%q'", ((mp_obj_type_t*)MP_OBJ_TO_PTR(base))->name, attr)); @@ -1158,7 +1171,7 @@ mp_obj_t mp_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) { mp_raise_TypeError("object not iterable"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object is not iterable", mp_obj_get_type_str(o_in))); + "'%s' object isn't iterable", mp_obj_get_type_str(o_in))); } } @@ -1180,7 +1193,7 @@ mp_obj_t mp_iternext_allow_raise(mp_obj_t o_in) { mp_raise_TypeError("object not an iterator"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object is not an iterator", mp_obj_get_type_str(o_in))); + "'%s' object isn't an iterator", mp_obj_get_type_str(o_in))); } } } @@ -1216,7 +1229,7 @@ mp_obj_t mp_iternext(mp_obj_t o_in) { mp_raise_TypeError("object not an iterator"); } else { nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError, - "'%s' object is not an iterator", mp_obj_get_type_str(o_in))); + "'%s' object isn't an iterator", mp_obj_get_type_str(o_in))); } } } @@ -1249,15 +1262,8 @@ mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t th if (send_value == mp_const_none) { mp_load_method_maybe(self_in, MP_QSTR___next__, dest); if (dest[0] != MP_OBJ_NULL) { - nlr_buf_t nlr; - if (nlr_push(&nlr) == 0) { - *ret_val = mp_call_method_n_kw(0, 0, dest); - nlr_pop(); - return MP_VM_RETURN_YIELD; - } else { - *ret_val = MP_OBJ_FROM_PTR(nlr.ret_val); - return MP_VM_RETURN_EXCEPTION; - } + *ret_val = mp_call_method_n_kw(0, 0, dest); + return MP_VM_RETURN_YIELD; } } @@ -1266,10 +1272,6 @@ mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t th if (send_value != MP_OBJ_NULL) { mp_load_method(self_in, MP_QSTR_send, dest); dest[2] = send_value; - // TODO: This should have exception wrapping like __next__ case - // above. Not done right away to think how to optimize native - // generators better, see: - // https://github.com/micropython/micropython/issues/2628 *ret_val = mp_call_method_n_kw(1, 0, dest); return MP_VM_RETURN_YIELD; } @@ -1331,7 +1333,7 @@ mp_obj_t mp_import_name(qstr name, mp_obj_t fromlist, mp_obj_t level) { args[1] = mp_const_none; // TODO should be globals args[2] = mp_const_none; // TODO should be locals args[3] = fromlist; - args[4] = level; // must be 0; we don't yet support other values + args[4] = level; // TODO lookup __import__ and call that instead of going straight to builtin implementation return mp_builtin___import__(5, args); @@ -1373,15 +1375,8 @@ import_error: qstr dot_name_q = qstr_from_strn(dot_name, dot_name_len); mp_local_free(dot_name); - mp_obj_t args[5]; - args[0] = MP_OBJ_NEW_QSTR(dot_name_q); - args[1] = mp_const_none; // TODO should be globals - args[2] = mp_const_none; // TODO should be locals - args[3] = mp_const_true; // Pass sentinel "non empty" value to force returning of leaf module - args[4] = MP_OBJ_NEW_SMALL_INT(0); - - // TODO lookup __import__ and call that instead of going straight to builtin implementation - return mp_builtin___import__(5, args); + // For fromlist, pass sentinel "non empty" value to force returning of leaf module + return mp_import_name(dot_name_q, mp_const_true, MP_OBJ_NEW_SMALL_INT(0)); #else @@ -1395,12 +1390,16 @@ void mp_import_all(mp_obj_t module) { DEBUG_printf("import all %p\n", module); // TODO: Support __all__ - mp_map_t *map = mp_obj_dict_get_map(MP_OBJ_FROM_PTR(mp_obj_module_get_globals(module))); + mp_map_t *map = &mp_obj_module_get_globals(module)->map; for (size_t i = 0; i < map->alloc; i++) { - if (MP_MAP_SLOT_IS_FILLED(map, i)) { - qstr name = MP_OBJ_QSTR_VALUE(map->table[i].key); - if (*qstr_str(name) != '_') { - mp_store_name(name, map->table[i].value); + if (mp_map_slot_is_filled(map, i)) { + // Entry in module global scope may be generated programmatically + // (and thus be not a qstr for longer names). Avoid turning it in + // qstr if it has '_' and was used exactly to save memory. + const char *name = mp_obj_str_get_str(map->table[i].key); + if (*name != '_') { + qstr qname = mp_obj_str_get_qstr(map->table[i].key); + mp_store_name(qname, map->table[i].value); } } } diff --git a/python/src/py/runtime.h b/python/src/py/runtime.h index ad65f3f46..0eb15d461 100644 --- a/python/src/py/runtime.h +++ b/python/src/py/runtime.h @@ -70,14 +70,17 @@ void mp_handle_pending_tail(mp_uint_t atomic_state); #if MICROPY_ENABLE_SCHEDULER void mp_sched_lock(void); void mp_sched_unlock(void); -static inline unsigned int mp_sched_num_pending(void) { return MP_STATE_VM(sched_sp); } +static inline unsigned int mp_sched_num_pending(void) { return MP_STATE_VM(sched_len); } bool mp_sched_schedule(mp_obj_t function, mp_obj_t arg); #endif // extra printing method specifically for mp_obj_t's which are integral type int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec); -void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw); +void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig); +static inline void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw) { + mp_arg_check_num_sig(n_args, n_kw, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, takes_kw)); +} void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals); void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals); NORETURN void mp_arg_error_terse_mismatch(void); @@ -166,8 +169,10 @@ NORETURN void mp_raise_recursion_depth(void); #endif // helper functions for native/viper code -mp_uint_t mp_convert_obj_to_native(mp_obj_t obj, mp_uint_t type); -mp_obj_t mp_convert_native_to_obj(mp_uint_t val, mp_uint_t type); +int mp_native_type_from_qstr(qstr qst); +mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type); +mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type); +mp_obj_dict_t *mp_native_swap_globals(mp_obj_dict_t *new_globals); mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, size_t n_args_kw, const mp_obj_t *args); void mp_native_raise(mp_obj_t o); @@ -175,7 +180,9 @@ void mp_native_raise(mp_obj_t o); #define mp_sys_argv (MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_sys_argv_obj))) #if MICROPY_WARNINGS -void mp_warning(const char *msg, ...); +#ifndef mp_warning +void mp_warning(const char *category, const char *msg, ...); +#endif #else #define mp_warning(...) #endif diff --git a/python/src/py/runtime0.h b/python/src/py/runtime0.h index 960532d17..efd439196 100644 --- a/python/src/py/runtime0.h +++ b/python/src/py/runtime0.h @@ -26,11 +26,15 @@ #ifndef MICROPY_INCLUDED_PY_RUNTIME0_H #define MICROPY_INCLUDED_PY_RUNTIME0_H -// These must fit in 8 bits; see scope.h +// The first four must fit in 8 bits, see emitbc.c +// The remaining must fit in 16 bits, see scope.h #define MP_SCOPE_FLAG_VARARGS (0x01) #define MP_SCOPE_FLAG_VARKEYWORDS (0x02) #define MP_SCOPE_FLAG_GENERATOR (0x04) #define MP_SCOPE_FLAG_DEFKWARGS (0x08) +#define MP_SCOPE_FLAG_REFGLOBALS (0x10) // used only if native emitter enabled +#define MP_SCOPE_FLAG_HASCONSTS (0x20) // used only if native emitter enabled +#define MP_SCOPE_FLAG_VIPERRET_POS (6) // 3 bits used for viper return type // types for native (viper) function signature #define MP_NATIVE_TYPE_OBJ (0x00) @@ -57,6 +61,7 @@ typedef enum { MP_UNARY_OP_LEN, // __len__ MP_UNARY_OP_HASH, // __hash__; must return a small int MP_UNARY_OP_ABS, // __abs__ + MP_UNARY_OP_INT, // __int__ MP_UNARY_OP_SIZEOF, // for sys.getsizeof() MP_UNARY_OP_NUM_RUNTIME, @@ -143,8 +148,12 @@ typedef enum { } mp_binary_op_t; typedef enum { - MP_F_CONVERT_OBJ_TO_NATIVE = 0, + MP_F_CONST_NONE_OBJ = 0, + MP_F_CONST_FALSE_OBJ, + MP_F_CONST_TRUE_OBJ, + MP_F_CONVERT_OBJ_TO_NATIVE, MP_F_CONVERT_NATIVE_TO_OBJ, + MP_F_NATIVE_SWAP_GLOBALS, MP_F_LOAD_NAME, MP_F_LOAD_GLOBAL, MP_F_LOAD_BUILD_CLASS, @@ -164,8 +173,8 @@ typedef enum { MP_F_BUILD_MAP, MP_F_STORE_MAP, #if MICROPY_PY_BUILTINS_SET - MP_F_BUILD_SET, MP_F_STORE_SET, + MP_F_BUILD_SET, #endif MP_F_MAKE_FUNCTION_FROM_RAW_CODE, MP_F_NATIVE_CALL_FUNCTION_N_KW, @@ -188,12 +197,14 @@ typedef enum { MP_F_DELETE_GLOBAL, MP_F_NEW_CELL, MP_F_MAKE_CLOSURE_FROM_RAW_CODE, + MP_F_ARG_CHECK_NUM_SIG, MP_F_SETUP_CODE_STATE, MP_F_SMALL_INT_FLOOR_DIVIDE, MP_F_SMALL_INT_MODULO, + MP_F_NATIVE_YIELD_FROM, MP_F_NUMBER_OF, } mp_fun_kind_t; -extern void *const mp_fun_table[MP_F_NUMBER_OF]; +extern const void *const mp_fun_table[MP_F_NUMBER_OF]; #endif // MICROPY_INCLUDED_PY_RUNTIME0_H diff --git a/python/src/py/scheduler.c b/python/src/py/scheduler.c index 30851a4d2..5edff45b6 100644 --- a/python/src/py/scheduler.c +++ b/python/src/py/scheduler.c @@ -30,6 +30,19 @@ #if MICROPY_ENABLE_SCHEDULER +#define IDX_MASK(i) ((i) & (MICROPY_SCHEDULER_DEPTH - 1)) + +static inline bool mp_sched_full(void) { + MP_STATIC_ASSERT(MICROPY_SCHEDULER_DEPTH <= 255); // MICROPY_SCHEDULER_DEPTH must fit in 8 bits + MP_STATIC_ASSERT((IDX_MASK(MICROPY_SCHEDULER_DEPTH) == 0)); // MICROPY_SCHEDULER_DEPTH must be a power of 2 + + return mp_sched_num_pending() == MICROPY_SCHEDULER_DEPTH; +} + +static inline bool mp_sched_empty(void) { + return mp_sched_num_pending() == 0; +} + // A variant of this is inlined in the VM at the pending exception check void mp_handle_pending(void) { if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) { @@ -51,8 +64,10 @@ void mp_handle_pending(void) { // or by the VM's inlined version of that function. void mp_handle_pending_tail(mp_uint_t atomic_state) { MP_STATE_VM(sched_state) = MP_SCHED_LOCKED; - if (MP_STATE_VM(sched_sp) > 0) { - mp_sched_item_t item = MP_STATE_VM(sched_stack)[--MP_STATE_VM(sched_sp)]; + if (!mp_sched_empty()) { + mp_sched_item_t item = MP_STATE_VM(sched_stack)[MP_STATE_VM(sched_idx)]; + MP_STATE_VM(sched_idx) = IDX_MASK(MP_STATE_VM(sched_idx) + 1); + --MP_STATE_VM(sched_len); MICROPY_END_ATOMIC_SECTION(atomic_state); mp_call_function_1_protected(item.func, item.arg); } else { @@ -87,13 +102,13 @@ void mp_sched_unlock(void) { bool mp_sched_schedule(mp_obj_t function, mp_obj_t arg) { mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION(); bool ret; - if (MP_STATE_VM(sched_sp) < MICROPY_SCHEDULER_DEPTH) { + if (!mp_sched_full()) { if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) { MP_STATE_VM(sched_state) = MP_SCHED_PENDING; } - MP_STATE_VM(sched_stack)[MP_STATE_VM(sched_sp)].func = function; - MP_STATE_VM(sched_stack)[MP_STATE_VM(sched_sp)].arg = arg; - ++MP_STATE_VM(sched_sp); + uint8_t iput = IDX_MASK(MP_STATE_VM(sched_idx) + MP_STATE_VM(sched_len)++); + MP_STATE_VM(sched_stack)[iput].func = function; + MP_STATE_VM(sched_stack)[iput].arg = arg; ret = true; } else { // schedule stack is full diff --git a/python/src/py/scope.c b/python/src/py/scope.c index 1a6ae7b8a..d996731f8 100644 --- a/python/src/py/scope.c +++ b/python/src/py/scope.c @@ -64,10 +64,9 @@ void scope_free(scope_t *scope) { m_del(scope_t, scope, 1); } -id_info_t *scope_find_or_add_id(scope_t *scope, qstr qst, bool *added) { +id_info_t *scope_find_or_add_id(scope_t *scope, qstr qst, scope_kind_t kind) { id_info_t *id_info = scope_find(scope, qst); if (id_info != NULL) { - *added = false; return id_info; } @@ -82,11 +81,10 @@ id_info_t *scope_find_or_add_id(scope_t *scope, qstr qst, bool *added) { // handled by the compiler because it adds arguments before compiling the body id_info = &scope->id_info[scope->id_info_len++]; - id_info->kind = 0; + id_info->kind = kind; id_info->flags = 0; id_info->local_num = 0; id_info->qst = qst; - *added = true; return id_info; } @@ -110,9 +108,8 @@ STATIC void scope_close_over_in_parents(scope_t *scope, qstr qst) { assert(scope->parent != NULL); // we should have at least 1 parent for (scope_t *s = scope->parent;; s = s->parent) { assert(s->parent != NULL); // we should not get to the outer scope - bool added; - id_info_t *id = scope_find_or_add_id(s, qst, &added); - if (added) { + id_info_t *id = scope_find_or_add_id(s, qst, ID_INFO_KIND_UNDECIDED); + if (id->kind == ID_INFO_KIND_UNDECIDED) { // variable not previously declared in this scope, so declare it as free and keep searching parents id->kind = ID_INFO_KIND_FREE; } else { @@ -130,21 +127,19 @@ STATIC void scope_close_over_in_parents(scope_t *scope, qstr qst) { } } -void scope_find_local_and_close_over(scope_t *scope, id_info_t *id, qstr qst) { +void scope_check_to_close_over(scope_t *scope, id_info_t *id) { if (scope->parent != NULL) { for (scope_t *s = scope->parent; s->parent != NULL; s = s->parent) { - id_info_t *id2 = scope_find(s, qst); + id_info_t *id2 = scope_find(s, id->qst); if (id2 != NULL) { if (id2->kind == ID_INFO_KIND_LOCAL || id2->kind == ID_INFO_KIND_CELL || id2->kind == ID_INFO_KIND_FREE) { id->kind = ID_INFO_KIND_FREE; - scope_close_over_in_parents(scope, qst); - return; + scope_close_over_in_parents(scope, id->qst); } break; } } } - id->kind = ID_INFO_KIND_GLOBAL_IMPLICIT; } #endif // MICROPY_ENABLE_COMPILER diff --git a/python/src/py/scope.h b/python/src/py/scope.h index e3b6a57c7..ba07c3949 100644 --- a/python/src/py/scope.h +++ b/python/src/py/scope.h @@ -30,6 +30,7 @@ #include "py/emitglue.h" enum { + ID_INFO_KIND_UNDECIDED, ID_INFO_KIND_GLOBAL_IMPLICIT, ID_INFO_KIND_GLOBAL_EXPLICIT, ID_INFO_KIND_LOCAL, // in a function f, written and only referenced by f @@ -41,6 +42,7 @@ enum { ID_FLAG_IS_PARAM = 0x01, ID_FLAG_IS_STAR_PARAM = 0x02, ID_FLAG_IS_DBL_STAR_PARAM = 0x04, + ID_FLAG_VIPER_TYPE_POS = 4, }; typedef struct _id_info_t { @@ -71,11 +73,11 @@ typedef struct _scope_t { struct _scope_t *parent; struct _scope_t *next; mp_parse_node_t pn; + mp_raw_code_t *raw_code; uint16_t source_file; // a qstr uint16_t simple_name; // a qstr - mp_raw_code_t *raw_code; - uint8_t scope_flags; // see runtime0.h - uint8_t emit_options; // see compile.h + uint16_t scope_flags; // see runtime0.h + uint16_t emit_options; // see emitglue.h uint16_t num_pos_args; uint16_t num_kwonly_args; uint16_t num_def_pos_args; @@ -89,9 +91,9 @@ typedef struct _scope_t { scope_t *scope_new(scope_kind_t kind, mp_parse_node_t pn, qstr source_file, mp_uint_t emit_options); void scope_free(scope_t *scope); -id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, bool *added); +id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, scope_kind_t kind); id_info_t *scope_find(scope_t *scope, qstr qstr); id_info_t *scope_find_global(scope_t *scope, qstr qstr); -void scope_find_local_and_close_over(scope_t *scope, id_info_t *id, qstr qst); +void scope_check_to_close_over(scope_t *scope, id_info_t *id); #endif // MICROPY_INCLUDED_PY_SCOPE_H diff --git a/python/src/py/showbc.c b/python/src/py/showbc.c index 3deb18cd3..b9024b716 100644 --- a/python/src/py/showbc.c +++ b/python/src/py/showbc.c @@ -401,14 +401,9 @@ const byte *mp_bytecode_print_str(const byte *ip) { printf("FOR_ITER " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start)); break; - case MP_BC_POP_BLOCK: - // pops block and restores the stack - printf("POP_BLOCK"); - break; - - case MP_BC_POP_EXCEPT: - // pops block, checks it's an exception block, and restores the stack, saving the 3 exception values to local threadstate - printf("POP_EXCEPT"); + case MP_BC_POP_EXCEPT_JUMP: + DECODE_ULABEL; // these labels are always forward + printf("POP_EXCEPT_JUMP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start)); break; case MP_BC_BUILD_TUPLE: diff --git a/python/src/py/stream.c b/python/src/py/stream.c index 393988d2c..762cd0fc0 100644 --- a/python/src/py/stream.c +++ b/python/src/py/stream.c @@ -3,8 +3,8 @@ * * The MIT License (MIT) * - * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2014 Damien P. George + * Copyright (c) 2014-2016 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -47,10 +47,9 @@ STATIC mp_obj_t stream_readall(mp_obj_t self_in); // be equal to input size). mp_uint_t mp_stream_rw(mp_obj_t stream, void *buf_, mp_uint_t size, int *errcode, byte flags) { byte *buf = buf_; - mp_obj_base_t* s = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream); typedef mp_uint_t (*io_func_t)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode); io_func_t io_func; - const mp_stream_p_t *stream_p = s->type->protocol; + const mp_stream_p_t *stream_p = mp_get_stream(stream); if (flags & MP_STREAM_RW_WRITE) { io_func = (io_func_t)stream_p->write; } else { @@ -99,8 +98,6 @@ const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags) { } STATIC mp_obj_t stream_read_generic(size_t n_args, const mp_obj_t *args, byte flags) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_READ); - // What to do if sz < -1? Python docs don't specify this case. // CPython does a readall, but here we silently let negatives through, // and they will cause a MemoryError. @@ -109,6 +106,8 @@ STATIC mp_obj_t stream_read_generic(size_t n_args, const mp_obj_t *args, byte fl return stream_readall(args[0]); } + const mp_stream_p_t *stream_p = mp_get_stream(args[0]); + #if MICROPY_PY_BUILTINS_STR_UNICODE if (stream_p->is_text) { // We need to read sz number of unicode characters. Because we don't have any @@ -227,8 +226,6 @@ STATIC mp_obj_t stream_read1(size_t n_args, const mp_obj_t *args) { MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read1_obj, 1, 2, stream_read1); mp_obj_t mp_stream_write(mp_obj_t self_in, const void *buf, size_t len, byte flags) { - mp_get_stream_raise(self_in, MP_STREAM_OP_WRITE); - int error; mp_uint_t out_sz = mp_stream_rw(self_in, (void*)buf, len, &error, flags); if (error != 0) { @@ -244,7 +241,7 @@ mp_obj_t mp_stream_write(mp_obj_t self_in, const void *buf, size_t len, byte fla } } -// XXX hack +// This is used to adapt a stream object to an mp_print_t interface void mp_stream_write_adaptor(void *self, const char *buf, size_t len) { mp_stream_write(MP_OBJ_FROM_PTR(self), buf, len, MP_STREAM_RW_WRITE); } @@ -276,7 +273,6 @@ STATIC mp_obj_t stream_write1_method(mp_obj_t self_in, mp_obj_t arg) { MP_DEFINE_CONST_FUN_OBJ_2(mp_stream_write1_obj, stream_write1_method); STATIC mp_obj_t stream_readinto(size_t n_args, const mp_obj_t *args) { - mp_get_stream_raise(args[0], MP_STREAM_OP_READ); mp_buffer_info_t bufinfo; mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_WRITE); @@ -305,7 +301,7 @@ STATIC mp_obj_t stream_readinto(size_t n_args, const mp_obj_t *args) { MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_readinto_obj, 2, 3, stream_readinto); STATIC mp_obj_t stream_readall(mp_obj_t self_in) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(self_in, MP_STREAM_OP_READ); + const mp_stream_p_t *stream_p = mp_get_stream(self_in); mp_uint_t total_size = 0; vstr_t vstr; @@ -346,7 +342,7 @@ STATIC mp_obj_t stream_readall(mp_obj_t self_in) { // Unbuffered, inefficient implementation of readline() for raw I/O files. STATIC mp_obj_t stream_unbuffered_readline(size_t n_args, const mp_obj_t *args) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_READ); + const mp_stream_p_t *stream_p = mp_get_stream(args[0]); mp_int_t max_size = -1; if (n_args > 1) { @@ -421,7 +417,7 @@ mp_obj_t mp_stream_unbuffered_iter(mp_obj_t self) { } mp_obj_t mp_stream_close(mp_obj_t stream) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(stream, MP_STREAM_OP_IOCTL); + const mp_stream_p_t *stream_p = mp_get_stream(stream); int error; mp_uint_t res = stream_p->ioctl(stream, MP_STREAM_CLOSE, 0, &error); if (res == MP_STREAM_ERROR) { @@ -432,8 +428,6 @@ mp_obj_t mp_stream_close(mp_obj_t stream) { MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_close_obj, mp_stream_close); STATIC mp_obj_t stream_seek(size_t n_args, const mp_obj_t *args) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_IOCTL); - struct mp_stream_seek_t seek_s; // TODO: Could be uint64 seek_s.offset = mp_obj_get_int(args[1]); @@ -447,6 +441,7 @@ STATIC mp_obj_t stream_seek(size_t n_args, const mp_obj_t *args) { mp_raise_OSError(MP_EINVAL); } + const mp_stream_p_t *stream_p = mp_get_stream(args[0]); int error; mp_uint_t res = stream_p->ioctl(args[0], MP_STREAM_SEEK, (mp_uint_t)(uintptr_t)&seek_s, &error); if (res == MP_STREAM_ERROR) { @@ -467,7 +462,7 @@ STATIC mp_obj_t stream_tell(mp_obj_t self) { MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_tell_obj, stream_tell); STATIC mp_obj_t stream_flush(mp_obj_t self) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(self, MP_STREAM_OP_IOCTL); + const mp_stream_p_t *stream_p = mp_get_stream(self); int error; mp_uint_t res = stream_p->ioctl(self, MP_STREAM_FLUSH, 0, &error); if (res == MP_STREAM_ERROR) { @@ -478,8 +473,6 @@ STATIC mp_obj_t stream_flush(mp_obj_t self) { MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_flush_obj, stream_flush); STATIC mp_obj_t stream_ioctl(size_t n_args, const mp_obj_t *args) { - const mp_stream_p_t *stream_p = mp_get_stream_raise(args[0], MP_STREAM_OP_IOCTL); - mp_buffer_info_t bufinfo; uintptr_t val = 0; if (n_args > 2) { @@ -490,6 +483,7 @@ STATIC mp_obj_t stream_ioctl(size_t n_args, const mp_obj_t *args) { } } + const mp_stream_p_t *stream_p = mp_get_stream(args[0]); int error; mp_uint_t res = stream_p->ioctl(args[0], mp_obj_get_int(args[1]), val, &error); if (res == MP_STREAM_ERROR) { @@ -513,10 +507,10 @@ MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_ioctl_obj, 2, 3, stream_ioctl); // status, this variable will contain error no. int mp_stream_errno; -ssize_t mp_stream_posix_write(mp_obj_t stream, const void *buf, size_t len) { - mp_obj_base_t* o = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream); +ssize_t mp_stream_posix_write(void *stream, const void *buf, size_t len) { + mp_obj_base_t* o = stream; const mp_stream_p_t *stream_p = o->type->protocol; - mp_uint_t out_sz = stream_p->write(stream, buf, len, &mp_stream_errno); + mp_uint_t out_sz = stream_p->write(MP_OBJ_FROM_PTR(stream), buf, len, &mp_stream_errno); if (out_sz == MP_STREAM_ERROR) { return -1; } else { @@ -524,10 +518,10 @@ ssize_t mp_stream_posix_write(mp_obj_t stream, const void *buf, size_t len) { } } -ssize_t mp_stream_posix_read(mp_obj_t stream, void *buf, size_t len) { - mp_obj_base_t* o = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream); +ssize_t mp_stream_posix_read(void *stream, void *buf, size_t len) { + mp_obj_base_t* o = stream; const mp_stream_p_t *stream_p = o->type->protocol; - mp_uint_t out_sz = stream_p->read(stream, buf, len, &mp_stream_errno); + mp_uint_t out_sz = stream_p->read(MP_OBJ_FROM_PTR(stream), buf, len, &mp_stream_errno); if (out_sz == MP_STREAM_ERROR) { return -1; } else { @@ -535,23 +529,23 @@ ssize_t mp_stream_posix_read(mp_obj_t stream, void *buf, size_t len) { } } -off_t mp_stream_posix_lseek(mp_obj_t stream, off_t offset, int whence) { - const mp_obj_base_t* o = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream); +off_t mp_stream_posix_lseek(void *stream, off_t offset, int whence) { + const mp_obj_base_t* o = stream; const mp_stream_p_t *stream_p = o->type->protocol; struct mp_stream_seek_t seek_s; seek_s.offset = offset; seek_s.whence = whence; - mp_uint_t res = stream_p->ioctl(stream, MP_STREAM_SEEK, (mp_uint_t)(uintptr_t)&seek_s, &mp_stream_errno); + mp_uint_t res = stream_p->ioctl(MP_OBJ_FROM_PTR(stream), MP_STREAM_SEEK, (mp_uint_t)(uintptr_t)&seek_s, &mp_stream_errno); if (res == MP_STREAM_ERROR) { return -1; } return seek_s.offset; } -int mp_stream_posix_fsync(mp_obj_t stream) { - mp_obj_base_t* o = (mp_obj_base_t*)MP_OBJ_TO_PTR(stream); +int mp_stream_posix_fsync(void *stream) { + mp_obj_base_t* o = stream; const mp_stream_p_t *stream_p = o->type->protocol; - mp_uint_t res = stream_p->ioctl(stream, MP_STREAM_FLUSH, 0, &mp_stream_errno); + mp_uint_t res = stream_p->ioctl(MP_OBJ_FROM_PTR(stream), MP_STREAM_FLUSH, 0, &mp_stream_errno); if (res == MP_STREAM_ERROR) { return -1; } diff --git a/python/src/py/stream.h b/python/src/py/stream.h index a1d1c4f8a..b6019bb38 100644 --- a/python/src/py/stream.h +++ b/python/src/py/stream.h @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014-2016 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -41,6 +42,7 @@ #define MP_STREAM_SET_OPTS (7) // Set stream options #define MP_STREAM_GET_DATA_OPTS (8) // Get data/message options #define MP_STREAM_SET_DATA_OPTS (9) // Set data/message options +#define MP_STREAM_GET_FILENO (10) // Get fileno of underlying file // These poll ioctl values are compatible with Linux #define MP_STREAM_POLL_RD (0x0001) @@ -62,6 +64,16 @@ struct mp_stream_seek_t { #define MP_SEEK_CUR (1) #define MP_SEEK_END (2) +// Stream protocol +typedef struct _mp_stream_p_t { + // On error, functions should return MP_STREAM_ERROR and fill in *errcode (values + // are implementation-dependent, but will be exposed to user, e.g. via exception). + mp_uint_t (*read)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode); + mp_uint_t (*write)(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode); + mp_uint_t (*ioctl)(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode); + mp_uint_t is_text : 1; // default is bytes, set this for text stream +} mp_stream_p_t; + MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read1_obj); MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_readinto_obj); @@ -80,6 +92,11 @@ MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_ioctl_obj); #define MP_STREAM_OP_WRITE (2) #define MP_STREAM_OP_IOCTL (4) +// Object is assumed to have a non-NULL stream protocol with valid r/w/ioctl methods +static inline const mp_stream_p_t *mp_get_stream(mp_const_obj_t self) { + return (const mp_stream_p_t*)((const mp_obj_base_t*)MP_OBJ_TO_PTR(self))->type->protocol; +} + const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags); mp_obj_t mp_stream_close(mp_obj_t stream); @@ -100,10 +117,11 @@ void mp_stream_write_adaptor(void *self, const char *buf, size_t len); #if MICROPY_STREAMS_POSIX_API // Functions with POSIX-compatible signatures -ssize_t mp_stream_posix_write(mp_obj_t stream, const void *buf, size_t len); -ssize_t mp_stream_posix_read(mp_obj_t stream, void *buf, size_t len); -off_t mp_stream_posix_lseek(mp_obj_t stream, off_t offset, int whence); -int mp_stream_posix_fsync(mp_obj_t stream); +// "stream" is assumed to be a pointer to a concrete object with the stream protocol +ssize_t mp_stream_posix_write(void *stream, const void *buf, size_t len); +ssize_t mp_stream_posix_read(void *stream, void *buf, size_t len); +off_t mp_stream_posix_lseek(void *stream, off_t offset, int whence); +int mp_stream_posix_fsync(void *stream); #endif #if MICROPY_STREAMS_NON_BLOCK diff --git a/python/src/py/unicode.c b/python/src/py/unicode.c index 935dc9012..d69b6f56f 100644 --- a/python/src/py/unicode.c +++ b/python/src/py/unicode.c @@ -180,7 +180,7 @@ bool utf8_check(const byte *p, size_t len) { for (; p < end; p++) { byte c = *p; if (need) { - if (c >= 0x80) { + if (UTF8_IS_CONT(c)) { need--; } else { // mismatch diff --git a/python/src/py/vm.c b/python/src/py/vm.c index 8abe65e43..260a7f38b 100644 --- a/python/src/py/vm.c +++ b/python/src/py/vm.c @@ -3,8 +3,8 @@ * * The MIT License (MIT) * - * Copyright (c) 2013, 2014 Damien P. George - * Copyright (c) 2014 Paul Sokolovsky + * Copyright (c) 2013-2019 Damien P. George + * Copyright (c) 2014-2015 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -100,13 +100,11 @@ DECODE_ULABEL; /* except labels are always forward */ \ ++exc_sp; \ exc_sp->handler = ip + ulab; \ - exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1) | currently_in_except_block); \ + exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1)); \ exc_sp->prev_exc = NULL; \ - currently_in_except_block = 0; /* in a try block now */ \ } while (0) #define POP_EXC_BLOCK() \ - currently_in_except_block = MP_TAGPTR_TAG0(exc_sp->val_sp); /* restore previous state */ \ exc_sp--; /* pop back to previous exception handler */ \ CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */ @@ -115,7 +113,7 @@ // returns: // MP_VM_RETURN_NORMAL, sp valid, return value in *sp // MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp -// MP_VM_RETURN_EXCEPTION, exception in fastn[0] +// MP_VM_RETURN_EXCEPTION, exception in state[0] mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state, volatile mp_obj_t inject_exc) { #define SELECTIVE_EXC_IP (0) #if SELECTIVE_EXC_IP @@ -136,7 +134,7 @@ mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state, volatile mp #define ENTRY(op) entry_##op #define ENTRY_DEFAULT entry_default #else - #define DISPATCH() break + #define DISPATCH() goto dispatch_loop #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check #define ENTRY(op) case op #define ENTRY_DEFAULT default @@ -161,7 +159,6 @@ run_code_state: ; } // variables that are visible to the exception handler (declared volatile) - volatile bool currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions mp_exc_stack_t *volatile exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack #if MICROPY_PY_THREAD_GIL && MICROPY_PY_THREAD_GIL_VM_DIVISOR @@ -336,7 +333,7 @@ dispatch_loop: MARK_EXC_IP_SELECTIVE(); DECODE_QSTR; mp_obj_t top = TOP(); - if (mp_obj_get_type(top)->attr == mp_obj_instance_attr) { + if (mp_obj_is_instance_type(mp_obj_get_type(top))) { mp_obj_instance_t *self = MP_OBJ_TO_PTR(top); mp_uint_t x = *ip; mp_obj_t key = MP_OBJ_NEW_QSTR(qst); @@ -434,7 +431,7 @@ dispatch_loop: MARK_EXC_IP_SELECTIVE(); DECODE_QSTR; mp_obj_t top = TOP(); - if (mp_obj_get_type(top)->attr == mp_obj_instance_attr && sp[-1] != MP_OBJ_NULL) { + if (mp_obj_is_instance_type(mp_obj_get_type(top)) && sp[-1] != MP_OBJ_NULL) { mp_obj_instance_t *self = MP_OBJ_TO_PTR(top); mp_uint_t x = *ip; mp_obj_t key = MP_OBJ_NEW_QSTR(qst); @@ -604,7 +601,7 @@ dispatch_loop: sp -= 2; mp_call_method_n_kw(3, 0, sp); SET_TOP(mp_const_none); - } else if (MP_OBJ_IS_SMALL_INT(TOP())) { + } else if (mp_obj_is_small_int(TOP())) { // Getting here there are two distinct cases: // - unwind return, stack: (..., __exit__, ctx_mgr, ret_val, SMALL_INT(-1)) // - unwind jump, stack: (..., __exit__, ctx_mgr, dest_ip, SMALL_INT(num_exc)) @@ -633,8 +630,6 @@ dispatch_loop: // replacing it with None, which signals END_FINALLY to just // execute the finally handler normally. SET_TOP(mp_const_none); - assert(exc_sp >= exc_stack); - POP_EXC_BLOCK(); } else { // We need to re-raise the exception. We pop __exit__ handler // by copying the exception instance down to the new top-of-stack. @@ -654,7 +649,7 @@ unwind_jump:; while ((unum & 0x7f) > 0) { unum -= 1; assert(exc_sp >= exc_stack); - if (MP_TAGPTR_TAG1(exc_sp->val_sp)) { + if (MP_TAGPTR_TAG1(exc_sp->val_sp) && exc_sp->handler > ip) { // Getting here the stack looks like: // (..., X, dest_ip) // where X is pointed to by exc_sp->val_sp and in the case @@ -680,7 +675,6 @@ unwind_jump:; DISPATCH_WITH_PEND_EXC_CHECK(); } - // matched against: POP_BLOCK or POP_EXCEPT (anything else?) ENTRY(MP_BC_SETUP_EXCEPT): ENTRY(MP_BC_SETUP_FINALLY): { MARK_EXC_IP_SELECTIVE(); @@ -698,8 +692,10 @@ unwind_jump:; // if TOS is an integer, finishes coroutine and returns control to caller // if TOS is an exception, reraises the exception if (TOP() == mp_const_none) { + assert(exc_sp >= exc_stack); + POP_EXC_BLOCK(); sp--; - } else if (MP_OBJ_IS_SMALL_INT(TOP())) { + } else if (mp_obj_is_small_int(TOP())) { // We finished "finally" coroutine and now dispatch back // to our caller, based on TOS value mp_int_t cause = MP_OBJ_SMALL_INT_VALUE(POP()); @@ -761,19 +757,13 @@ unwind_jump:; DISPATCH(); } - // matched against: SETUP_EXCEPT, SETUP_FINALLY, SETUP_WITH - ENTRY(MP_BC_POP_BLOCK): - // we are exiting an exception handler, so pop the last one of the exception-stack + ENTRY(MP_BC_POP_EXCEPT_JUMP): { assert(exc_sp >= exc_stack); POP_EXC_BLOCK(); - DISPATCH(); - - // matched against: SETUP_EXCEPT - ENTRY(MP_BC_POP_EXCEPT): - assert(exc_sp >= exc_stack); - assert(currently_in_except_block); - POP_EXC_BLOCK(); - DISPATCH(); + DECODE_ULABEL; + ip += ulab; + DISPATCH_WITH_PEND_EXC_CHECK(); + } ENTRY(MP_BC_BUILD_TUPLE): { MARK_EXC_IP_SELECTIVE(); @@ -817,17 +807,14 @@ unwind_jump:; #if MICROPY_PY_BUILTINS_SLICE ENTRY(MP_BC_BUILD_SLICE): { MARK_EXC_IP_SELECTIVE(); - DECODE_UINT; - if (unum == 2) { - mp_obj_t stop = POP(); - mp_obj_t start = TOP(); - SET_TOP(mp_obj_new_slice(start, stop, mp_const_none)); - } else { - mp_obj_t step = POP(); - mp_obj_t stop = POP(); - mp_obj_t start = TOP(); - SET_TOP(mp_obj_new_slice(start, stop, step)); + mp_obj_t step = mp_const_none; + if (*ip++ == 3) { + // 3-argument slice includes step + step = POP(); } + mp_obj_t stop = POP(); + mp_obj_t start = TOP(); + SET_TOP(mp_obj_new_slice(start, stop, step)); DISPATCH(); } #endif @@ -909,7 +896,7 @@ unwind_jump:; if (mp_obj_get_type(*sp) == &mp_type_fun_bc) { code_state->ip = ip; code_state->sp = sp; - code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block); + code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, 0); mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1); #if !MICROPY_ENABLE_PYSTACK if (new_state == NULL) { @@ -945,7 +932,7 @@ unwind_jump:; if (mp_obj_get_type(*sp) == &mp_type_fun_bc) { code_state->ip = ip; code_state->sp = sp; - code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block); + code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, 0); mp_call_args_t out_args; mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args); @@ -988,7 +975,7 @@ unwind_jump:; if (mp_obj_get_type(*sp) == &mp_type_fun_bc) { code_state->ip = ip; code_state->sp = sp; - code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block); + code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, 0); size_t n_args = unum & 0xff; size_t n_kw = (unum >> 8) & 0xff; @@ -1028,7 +1015,7 @@ unwind_jump:; if (mp_obj_get_type(*sp) == &mp_type_fun_bc) { code_state->ip = ip; code_state->sp = sp; - code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block); + code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, 0); mp_call_args_t out_args; mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args); @@ -1063,17 +1050,11 @@ unwind_jump:; ENTRY(MP_BC_RETURN_VALUE): MARK_EXC_IP_SELECTIVE(); - // These next 3 lines pop a try-finally exception handler, if one - // is there on the exception stack. Without this the finally block - // is executed a second time when the return is executed, because - // the try-finally exception handler is still on the stack. - // TODO Possibly find a better way to handle this case. - if (currently_in_except_block) { - POP_EXC_BLOCK(); - } unwind_return: + // Search for and execute finally handlers that aren't already active while (exc_sp >= exc_stack) { - if (MP_TAGPTR_TAG1(exc_sp->val_sp)) { + if (MP_TAGPTR_TAG1(exc_sp->val_sp) && exc_sp->handler > ip) { + // Found a finally handler that isn't active. // Getting here the stack looks like: // (..., X, [iter0, iter1, ...,] ret_val) // where X is pointed to by exc_sp->val_sp and in the case @@ -1092,10 +1073,10 @@ unwind_return: // done (when WITH_CLEANUP or END_FINALLY reached). PUSH(MP_OBJ_NEW_SMALL_INT(-1)); ip = exc_sp->handler; - exc_sp--; + POP_EXC_BLOCK(); goto dispatch_loop; } - exc_sp--; + POP_EXC_BLOCK(); } nlr_pop(); code_state->sp = sp; @@ -1125,7 +1106,7 @@ unwind_return: mp_uint_t unum = *ip; mp_obj_t obj; if (unum == 2) { - mp_warning("exception chaining not supported"); + mp_warning(NULL, "exception chaining not supported"); // ignore (pop) "from" argument sp--; } @@ -1154,14 +1135,14 @@ yield: nlr_pop(); code_state->ip = ip; code_state->sp = sp; - code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, currently_in_except_block); + code_state->exc_sp = MP_TAGPTR_MAKE(exc_sp, 0); return MP_VM_RETURN_YIELD; ENTRY(MP_BC_YIELD_FROM): { MARK_EXC_IP_SELECTIVE(); -//#define EXC_MATCH(exc, type) MP_OBJ_IS_TYPE(exc, type) +//#define EXC_MATCH(exc, type) mp_obj_is_type(exc, type) #define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type) -#define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { RAISE(t); } +#define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { mp_obj_t raise_t = mp_make_raise_obj(t); RAISE(raise_t); } mp_vm_return_kind_t ret_kind; mp_obj_t send_value = POP(); mp_obj_t t_exc = MP_OBJ_NULL; @@ -1270,10 +1251,10 @@ yield: } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) { fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP(); DISPATCH(); - } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + 7) { + } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NUM_BYTECODE) { SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP())); DISPATCH(); - } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + 36) { + } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BINARY_OP_NUM_BYTECODE) { mp_obj_t rhs = POP(); mp_obj_t lhs = TOP(); SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs)); @@ -1283,7 +1264,7 @@ yield: { mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, "byte code not implemented"); nlr_pop(); - fastn[0] = obj; + code_state->state[0] = obj; return MP_VM_RETURN_EXCEPTION; } @@ -1322,11 +1303,12 @@ pending_exception_check: #if MICROPY_PY_THREAD_GIL #if MICROPY_PY_THREAD_GIL_VM_DIVISOR - if (--gil_divisor == 0) { - gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR; - #else - { + if (--gil_divisor == 0) #endif + { + #if MICROPY_PY_THREAD_GIL_VM_DIVISOR + gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR; + #endif #if MICROPY_ENABLE_SCHEDULER // can only switch threads if the scheduler is unlocked if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) @@ -1427,7 +1409,8 @@ unwind_loop: mp_obj_exception_add_traceback(MP_OBJ_FROM_PTR(nlr.ret_val), source_file, source_line, block_name); } - while (currently_in_except_block) { + while (exc_sp >= exc_stack && exc_sp->handler <= code_state->ip) { + // nested exception assert(exc_sp >= exc_stack); @@ -1440,9 +1423,6 @@ unwind_loop: } if (exc_sp >= exc_stack) { - // set flag to indicate that we are now handling an exception - currently_in_except_block = 1; - // catch exception and pass to byte code code_state->ip = exc_sp->handler; mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp); @@ -1468,15 +1448,14 @@ unwind_loop: fastn = &code_state->state[n_state - 1]; exc_stack = (mp_exc_stack_t*)(code_state->state + n_state); // variables that are visible to the exception handler (declared volatile) - currently_in_except_block = MP_TAGPTR_TAG0(code_state->exc_sp); // 0 or 1, to detect nested exceptions exc_sp = MP_TAGPTR_PTR(code_state->exc_sp); // stack grows up, exc_sp points to top of stack goto unwind_loop; #endif } else { // propagate exception to higher level - // TODO what to do about ip and sp? they don't really make sense at this point - fastn[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // must put exception here because sp is invalid + // Note: ip and sp don't have usable values at this point + code_state->state[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // put exception here because sp is invalid return MP_VM_RETURN_EXCEPTION; } } diff --git a/python/src/py/vmentrytable.h b/python/src/py/vmentrytable.h index 615f4e2ce..641c8ee42 100644 --- a/python/src/py/vmentrytable.h +++ b/python/src/py/vmentrytable.h @@ -76,8 +76,7 @@ static const void *const entry_table[256] = { [MP_BC_GET_ITER] = &&entry_MP_BC_GET_ITER, [MP_BC_GET_ITER_STACK] = &&entry_MP_BC_GET_ITER_STACK, [MP_BC_FOR_ITER] = &&entry_MP_BC_FOR_ITER, - [MP_BC_POP_BLOCK] = &&entry_MP_BC_POP_BLOCK, - [MP_BC_POP_EXCEPT] = &&entry_MP_BC_POP_EXCEPT, + [MP_BC_POP_EXCEPT_JUMP] = &&entry_MP_BC_POP_EXCEPT_JUMP, [MP_BC_BUILD_TUPLE] = &&entry_MP_BC_BUILD_TUPLE, [MP_BC_BUILD_LIST] = &&entry_MP_BC_BUILD_LIST, [MP_BC_BUILD_MAP] = &&entry_MP_BC_BUILD_MAP, diff --git a/python/src/py/vstr.c b/python/src/py/vstr.c index 869b27805..6f480186e 100644 --- a/python/src/py/vstr.c +++ b/python/src/py/vstr.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2013, 2014 Damien P. George + * Copyright (c) 2014 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal diff --git a/python/src/py/warning.c b/python/src/py/warning.c index 12d0f9c99..cba21ba6c 100644 --- a/python/src/py/warning.c +++ b/python/src/py/warning.c @@ -4,6 +4,7 @@ * The MIT License (MIT) * * Copyright (c) 2014 Damien P. George + * Copyright (c) 2015-2018 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -32,10 +33,15 @@ #if MICROPY_WARNINGS -void mp_warning(const char *msg, ...) { +void mp_warning(const char *category, const char *msg, ...) { + if (category == NULL) { + category = "Warning"; + } + mp_print_str(MICROPY_ERROR_PRINTER, category); + mp_print_str(MICROPY_ERROR_PRINTER, ": "); + va_list args; va_start(args, msg); - mp_print_str(MICROPY_ERROR_PRINTER, "Warning: "); mp_vprintf(MICROPY_ERROR_PRINTER, msg, args); mp_print_str(MICROPY_ERROR_PRINTER, "\n"); va_end(args); @@ -43,7 +49,7 @@ void mp_warning(const char *msg, ...) { void mp_emitter_warning(pass_kind_t pass, const char *msg) { if (pass == MP_PASS_CODE_SIZE) { - mp_warning(msg, NULL); + mp_warning(NULL, msg); } }