mirror of
https://github.com/UpsilonNumworks/Upsilon.git
synced 2026-01-19 16:57:31 +01:00
Update MicroPython from 1.12 to 1.17
This commit is contained in:
@@ -135,7 +135,7 @@
|
||||
|
||||
#define EMIT_NATIVE_VIPER_TYPE_ERROR(emit, ...) do { \
|
||||
*emit->error_slot = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, __VA_ARGS__); \
|
||||
} while (0)
|
||||
} while (0)
|
||||
|
||||
typedef enum {
|
||||
STACK_VALUE,
|
||||
@@ -163,15 +163,25 @@ typedef enum {
|
||||
|
||||
STATIC qstr vtype_to_qstr(vtype_kind_t vtype) {
|
||||
switch (vtype) {
|
||||
case VTYPE_PYOBJ: return MP_QSTR_object;
|
||||
case VTYPE_BOOL: return MP_QSTR_bool;
|
||||
case VTYPE_INT: return MP_QSTR_int;
|
||||
case VTYPE_UINT: return MP_QSTR_uint;
|
||||
case VTYPE_PTR: return MP_QSTR_ptr;
|
||||
case VTYPE_PTR8: return MP_QSTR_ptr8;
|
||||
case VTYPE_PTR16: return MP_QSTR_ptr16;
|
||||
case VTYPE_PTR32: return MP_QSTR_ptr32;
|
||||
case VTYPE_PTR_NONE: default: return MP_QSTR_None;
|
||||
case VTYPE_PYOBJ:
|
||||
return MP_QSTR_object;
|
||||
case VTYPE_BOOL:
|
||||
return MP_QSTR_bool;
|
||||
case VTYPE_INT:
|
||||
return MP_QSTR_int;
|
||||
case VTYPE_UINT:
|
||||
return MP_QSTR_uint;
|
||||
case VTYPE_PTR:
|
||||
return MP_QSTR_ptr;
|
||||
case VTYPE_PTR8:
|
||||
return MP_QSTR_ptr8;
|
||||
case VTYPE_PTR16:
|
||||
return MP_QSTR_ptr16;
|
||||
case VTYPE_PTR32:
|
||||
return MP_QSTR_ptr32;
|
||||
case VTYPE_PTR_NONE:
|
||||
default:
|
||||
return MP_QSTR_None;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,6 +211,7 @@ struct _emit_t {
|
||||
int pass;
|
||||
|
||||
bool do_viper_types;
|
||||
bool prelude_offset_uses_u16_encoding;
|
||||
|
||||
mp_uint_t local_vtype_alloc;
|
||||
vtype_kind_t *local_vtype;
|
||||
@@ -244,7 +255,7 @@ STATIC void emit_native_global_exc_entry(emit_t *emit);
|
||||
STATIC void emit_native_global_exc_exit(emit_t *emit);
|
||||
STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj);
|
||||
|
||||
emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels) {
|
||||
emit_t *EXPORT_FUN(new)(mp_obj_t * error_slot, uint *label_slot, mp_uint_t max_num_labels) {
|
||||
emit_t *emit = m_new0(emit_t, 1);
|
||||
emit->error_slot = error_slot;
|
||||
emit->label_slot = label_slot;
|
||||
@@ -257,7 +268,7 @@ emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_nu
|
||||
return emit;
|
||||
}
|
||||
|
||||
void EXPORT_FUN(free)(emit_t *emit) {
|
||||
void EXPORT_FUN(free)(emit_t * emit) {
|
||||
mp_asm_base_deinit(&emit->as->base, false);
|
||||
m_del_obj(ASM_T, emit->as);
|
||||
m_del(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc);
|
||||
@@ -329,6 +340,18 @@ STATIC void emit_native_mov_reg_qstr_obj(emit_t *emit, int reg_dest, qstr qst) {
|
||||
emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
|
||||
} while (false)
|
||||
|
||||
#define emit_native_mov_state_imm_fix_u16_via(emit, local_num, imm, reg_temp) \
|
||||
do { \
|
||||
ASM_MOV_REG_IMM_FIX_U16((emit)->as, (reg_temp), (imm)); \
|
||||
emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
|
||||
} while (false)
|
||||
|
||||
#define emit_native_mov_state_imm_fix_word_via(emit, local_num, imm, reg_temp) \
|
||||
do { \
|
||||
ASM_MOV_REG_IMM_FIX_WORD((emit)->as, (reg_temp), (imm)); \
|
||||
emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
|
||||
} while (false)
|
||||
|
||||
STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
|
||||
DEBUG_printf("start_pass(pass=%u, scope=%p)\n", pass, scope);
|
||||
|
||||
@@ -539,16 +562,27 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
|
||||
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_PARENT_ARG_1);
|
||||
|
||||
// Set code_state.ip (offset from start of this function to prelude info)
|
||||
int code_state_ip_local = emit->code_state_start + OFFSETOF_CODE_STATE_IP;
|
||||
#if N_PRELUDE_AS_BYTES_OBJ
|
||||
// Prelude is a bytes object in const_table; store ip = prelude->data - fun_bc->bytecode
|
||||
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_LOCAL_3, emit->scope->num_pos_args + emit->scope->num_kwonly_args + 1);
|
||||
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_LOCAL_3, offsetof(mp_obj_str_t, data) / sizeof(uintptr_t));
|
||||
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_PARENT_ARG_1, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_BYTECODE);
|
||||
ASM_SUB_REG_REG(emit->as, REG_LOCAL_3, REG_PARENT_ARG_1);
|
||||
emit_native_mov_state_reg(emit, emit->code_state_start + OFFSETOF_CODE_STATE_IP, REG_LOCAL_3);
|
||||
emit_native_mov_state_reg(emit, code_state_ip_local, REG_LOCAL_3);
|
||||
#else
|
||||
// TODO this encoding may change size in the final pass, need to make it fixed
|
||||
emit_native_mov_state_imm_via(emit, emit->code_state_start + OFFSETOF_CODE_STATE_IP, emit->prelude_offset, REG_PARENT_ARG_1);
|
||||
if (emit->pass == MP_PASS_CODE_SIZE) {
|
||||
// Commit to the encoding size based on the value of prelude_offset in this pass.
|
||||
// By using 32768 as the cut-off it is highly unlikely that prelude_offset will
|
||||
// grow beyond 65535 by the end of thiss pass, and so require the larger encoding.
|
||||
emit->prelude_offset_uses_u16_encoding = emit->prelude_offset < 32768;
|
||||
}
|
||||
if (emit->prelude_offset_uses_u16_encoding) {
|
||||
assert(emit->prelude_offset <= 65535);
|
||||
emit_native_mov_state_imm_fix_u16_via(emit, code_state_ip_local, emit->prelude_offset, REG_PARENT_ARG_1);
|
||||
} else {
|
||||
emit_native_mov_state_imm_fix_word_via(emit, code_state_ip_local, emit->prelude_offset, REG_PARENT_ARG_1);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Set code_state.n_state (only works on little endian targets due to n_state being uint16_t)
|
||||
@@ -733,14 +767,14 @@ STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) {
|
||||
if (emit->pass > MP_PASS_SCOPE && emit->stack_size > emit->scope->stack_size) {
|
||||
emit->scope->stack_size = emit->stack_size;
|
||||
}
|
||||
#ifdef DEBUG_PRINT
|
||||
#ifdef DEBUG_PRINT
|
||||
DEBUG_printf(" adjust_stack; stack_size=%d+%d; stack now:", emit->stack_size - stack_size_delta, stack_size_delta);
|
||||
for (int i = 0; i < emit->stack_size; i++) {
|
||||
stack_info_t *si = &emit->stack_info[i];
|
||||
DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->data.u_reg);
|
||||
}
|
||||
DEBUG_printf("\n");
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
STATIC void emit_native_adjust_stack_size(emit_t *emit, mp_int_t delta) {
|
||||
@@ -807,10 +841,13 @@ STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
|
||||
}
|
||||
}
|
||||
|
||||
// Ensures all unsettled registers that hold Python values are copied to the
|
||||
// concrete Python stack. All registers are then free to use.
|
||||
STATIC void need_reg_all(emit_t *emit) {
|
||||
for (int i = 0; i < emit->stack_size; i++) {
|
||||
stack_info_t *si = &emit->stack_info[i];
|
||||
if (si->kind == STACK_REG) {
|
||||
DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
|
||||
si->kind = STACK_VALUE;
|
||||
emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
|
||||
}
|
||||
@@ -831,29 +868,27 @@ STATIC vtype_kind_t load_reg_stack_imm(emit_t *emit, int reg_dest, const stack_i
|
||||
} else if (si->vtype == VTYPE_PTR_NONE) {
|
||||
emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_NONE_OBJ);
|
||||
} else {
|
||||
mp_raise_NotImplementedError("conversion to object");
|
||||
mp_raise_NotImplementedError(MP_ERROR_TEXT("conversion to object"));
|
||||
}
|
||||
return VTYPE_PYOBJ;
|
||||
}
|
||||
}
|
||||
|
||||
// Copies all unsettled registers and immediates that are Python values into the
|
||||
// concrete Python stack. This ensures the concrete Python stack holds valid
|
||||
// values for the current stack_size.
|
||||
// This function may clobber REG_TEMP1.
|
||||
STATIC void need_stack_settled(emit_t *emit) {
|
||||
DEBUG_printf(" need_stack_settled; stack_size=%d\n", emit->stack_size);
|
||||
for (int i = 0; i < emit->stack_size; i++) {
|
||||
stack_info_t *si = &emit->stack_info[i];
|
||||
if (si->kind == STACK_REG) {
|
||||
DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
|
||||
si->kind = STACK_VALUE;
|
||||
emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
|
||||
}
|
||||
}
|
||||
need_reg_all(emit);
|
||||
for (int i = 0; i < emit->stack_size; i++) {
|
||||
stack_info_t *si = &emit->stack_info[i];
|
||||
if (si->kind == STACK_IMM) {
|
||||
DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i);
|
||||
si->kind = STACK_VALUE;
|
||||
si->vtype = load_reg_stack_imm(emit, REG_TEMP0, si, false);
|
||||
emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP0);
|
||||
// using REG_TEMP1 to avoid clobbering REG_TEMP0 (aka REG_RET)
|
||||
si->vtype = load_reg_stack_imm(emit, REG_TEMP1, si, false);
|
||||
emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1132,8 +1167,8 @@ STATIC void emit_native_label_assign(emit_t *emit, mp_uint_t l) {
|
||||
|
||||
bool is_finally = false;
|
||||
if (emit->exc_stack_size > 0) {
|
||||
exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
|
||||
is_finally = e->is_finally && e->label == l;
|
||||
exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
|
||||
is_finally = e->is_finally && e->label == l;
|
||||
}
|
||||
|
||||
if (is_finally) {
|
||||
@@ -1405,7 +1440,7 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
|
||||
DEBUG_printf("load_fast(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
|
||||
vtype_kind_t vtype = emit->local_vtype[local_num];
|
||||
if (vtype == VTYPE_UNBOUND) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "local '%q' used before type known", qst);
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("local '%q' used before type known"), qst);
|
||||
}
|
||||
emit_native_pre(emit);
|
||||
if (local_num < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit)) {
|
||||
@@ -1580,7 +1615,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
|
||||
}
|
||||
default:
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't load from '%q'", vtype_to_qstr(vtype_base));
|
||||
MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
|
||||
}
|
||||
} else {
|
||||
// index is not an immediate
|
||||
@@ -1590,7 +1625,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
|
||||
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
|
||||
if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't load with '%q' index", vtype_to_qstr(vtype_index));
|
||||
MP_ERROR_TEXT("can't load with '%q' index"), vtype_to_qstr(vtype_index));
|
||||
}
|
||||
switch (vtype_base) {
|
||||
case VTYPE_PTR8: {
|
||||
@@ -1618,7 +1653,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
|
||||
}
|
||||
default:
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't load from '%q'", vtype_to_qstr(vtype_base));
|
||||
MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
|
||||
}
|
||||
}
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_RET);
|
||||
@@ -1642,7 +1677,7 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num)
|
||||
} else if (emit->local_vtype[local_num] != vtype) {
|
||||
// type of local is not the same as object stored in it
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"local '%q' has type '%q' but source is '%q'",
|
||||
MP_ERROR_TEXT("local '%q' has type '%q' but source is '%q'"),
|
||||
qst, vtype_to_qstr(emit->local_vtype[local_num]), vtype_to_qstr(vtype));
|
||||
}
|
||||
}
|
||||
@@ -1735,7 +1770,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
|
||||
int reg_index = REG_ARG_2;
|
||||
int reg_value = REG_ARG_3;
|
||||
emit_pre_pop_reg_flexible(emit, &vtype_base, ®_base, reg_index, reg_value);
|
||||
#if N_X86
|
||||
#if N_X64 || N_X86
|
||||
// special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
|
||||
emit_pre_pop_reg(emit, &vtype_value, reg_value);
|
||||
#else
|
||||
@@ -1743,7 +1778,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
|
||||
#endif
|
||||
if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't store '%q'", vtype_to_qstr(vtype_value));
|
||||
MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
|
||||
}
|
||||
switch (vtype_base) {
|
||||
case VTYPE_PTR8: {
|
||||
@@ -1809,7 +1844,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
|
||||
}
|
||||
default:
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't store to '%q'", vtype_to_qstr(vtype_base));
|
||||
MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
|
||||
}
|
||||
} else {
|
||||
// index is not an immediate
|
||||
@@ -1820,9 +1855,9 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
|
||||
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
|
||||
if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't store with '%q' index", vtype_to_qstr(vtype_index));
|
||||
MP_ERROR_TEXT("can't store with '%q' index"), vtype_to_qstr(vtype_index));
|
||||
}
|
||||
#if N_X86
|
||||
#if N_X64 || N_X86
|
||||
// special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
|
||||
emit_pre_pop_reg(emit, &vtype_value, reg_value);
|
||||
#else
|
||||
@@ -1830,7 +1865,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
|
||||
#endif
|
||||
if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't store '%q'", vtype_to_qstr(vtype_value));
|
||||
MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
|
||||
}
|
||||
switch (vtype_base) {
|
||||
case VTYPE_PTR8: {
|
||||
@@ -1870,7 +1905,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
|
||||
}
|
||||
default:
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't store to '%q'", vtype_to_qstr(vtype_base));
|
||||
MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1992,7 +2027,7 @@ STATIC void emit_native_jump_helper(emit_t *emit, bool cond, mp_uint_t label, bo
|
||||
}
|
||||
if (!(vtype == VTYPE_BOOL || vtype == VTYPE_INT || vtype == VTYPE_UINT)) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't implicitly convert '%q' to 'bool'", vtype_to_qstr(vtype));
|
||||
MP_ERROR_TEXT("can't implicitly convert '%q' to 'bool'"), vtype_to_qstr(vtype));
|
||||
}
|
||||
}
|
||||
// For non-pop need to save the vtype so that emit_native_adjust_stack_size
|
||||
@@ -2058,7 +2093,7 @@ STATIC void emit_native_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t exc
|
||||
ASM_MOV_REG_PCREL(emit->as, REG_RET, label & ~MP_EMIT_BREAK_FROM_FOR);
|
||||
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_RET);
|
||||
// Cancel any active exception (see also emit_native_pop_except_jump)
|
||||
emit_native_mov_reg_const(emit, REG_RET, MP_F_CONST_NONE_OBJ);
|
||||
ASM_MOV_REG_IMM(emit->as, REG_RET, (mp_uint_t)MP_OBJ_NULL);
|
||||
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_RET);
|
||||
// Jump to the innermost active finally
|
||||
label = first_finally->label;
|
||||
@@ -2153,9 +2188,8 @@ STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
|
||||
|
||||
ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); // get exc
|
||||
|
||||
// Check if exc is None and jump to non-exc handler if it is
|
||||
emit_native_mov_reg_const(emit, REG_ARG_2, MP_F_CONST_NONE_OBJ);
|
||||
ASM_JUMP_IF_REG_EQ(emit->as, REG_ARG_1, REG_ARG_2, *emit->label_slot + 2);
|
||||
// Check if exc is MP_OBJ_NULL (i.e. zero) and jump to non-exc handler if it is
|
||||
ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, *emit->label_slot + 2, false);
|
||||
|
||||
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_1, 0); // get type(exc)
|
||||
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_2); // push type(exc)
|
||||
@@ -2175,9 +2209,9 @@ STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
|
||||
emit_call(emit, MP_F_OBJ_IS_TRUE);
|
||||
ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot + 1, true);
|
||||
|
||||
// Replace exception with None
|
||||
// Replace exception with MP_OBJ_NULL.
|
||||
emit_native_label_assign(emit, *emit->label_slot);
|
||||
emit_native_mov_reg_const(emit, REG_TEMP0, MP_F_CONST_NONE_OBJ);
|
||||
ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
|
||||
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
|
||||
|
||||
// end of with cleanup nlr_catch block
|
||||
@@ -2255,7 +2289,7 @@ STATIC void emit_native_for_iter_end(emit_t *emit) {
|
||||
STATIC void emit_native_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) {
|
||||
if (within_exc_handler) {
|
||||
// Cancel any active exception so subsequent handlers don't see it
|
||||
emit_native_mov_reg_const(emit, REG_TEMP0, MP_F_CONST_NONE_OBJ);
|
||||
ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
|
||||
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
|
||||
} else {
|
||||
emit_native_leave_exc_stack(emit, false);
|
||||
@@ -2272,7 +2306,7 @@ STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
|
||||
} else {
|
||||
adjust_stack(emit, 1);
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"unary op %q not implemented", mp_unary_op_method_name[op]);
|
||||
MP_ERROR_TEXT("unary op %q not implemented"), mp_unary_op_method_name[op]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2280,7 +2314,8 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
DEBUG_printf("binary_op(" UINT_FMT ")\n", op);
|
||||
vtype_kind_t vtype_lhs = peek_vtype(emit, 1);
|
||||
vtype_kind_t vtype_rhs = peek_vtype(emit, 0);
|
||||
if (vtype_lhs == VTYPE_INT && vtype_rhs == VTYPE_INT) {
|
||||
if ((vtype_lhs == VTYPE_INT || vtype_lhs == VTYPE_UINT)
|
||||
&& (vtype_rhs == VTYPE_INT || vtype_rhs == VTYPE_UINT)) {
|
||||
// for integers, inplace and normal ops are equivalent, so use just normal ops
|
||||
if (MP_BINARY_OP_INPLACE_OR <= op && op <= MP_BINARY_OP_INPLACE_POWER) {
|
||||
op += MP_BINARY_OP_OR - MP_BINARY_OP_INPLACE_OR;
|
||||
@@ -2297,9 +2332,13 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
if (op == MP_BINARY_OP_LSHIFT) {
|
||||
ASM_LSL_REG(emit->as, REG_RET);
|
||||
} else {
|
||||
ASM_ASR_REG(emit->as, REG_RET);
|
||||
if (vtype_lhs == VTYPE_UINT) {
|
||||
ASM_LSR_REG(emit->as, REG_RET);
|
||||
} else {
|
||||
ASM_ASR_REG(emit->as, REG_RET);
|
||||
}
|
||||
}
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_RET);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_RET);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
@@ -2307,6 +2346,10 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
// special cases for floor-divide and module because we dispatch to helper functions
|
||||
if (op == MP_BINARY_OP_FLOOR_DIVIDE || op == MP_BINARY_OP_MODULO) {
|
||||
emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_2, &vtype_lhs, REG_ARG_1);
|
||||
if (vtype_lhs != VTYPE_INT) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
MP_ERROR_TEXT("div/mod not implemented for uint"), mp_binary_op_method_name[op]);
|
||||
}
|
||||
if (op == MP_BINARY_OP_FLOOR_DIVIDE) {
|
||||
emit_call(emit, MP_F_SMALL_INT_FLOOR_DIVIDE);
|
||||
} else {
|
||||
@@ -2319,33 +2362,41 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
int reg_rhs = REG_ARG_3;
|
||||
emit_pre_pop_reg_flexible(emit, &vtype_rhs, ®_rhs, REG_RET, REG_ARG_2);
|
||||
emit_pre_pop_reg(emit, &vtype_lhs, REG_ARG_2);
|
||||
|
||||
#if !(N_X64 || N_X86)
|
||||
if (op == MP_BINARY_OP_LSHIFT) {
|
||||
ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
} else if (op == MP_BINARY_OP_RSHIFT) {
|
||||
ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
} else
|
||||
if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_RSHIFT) {
|
||||
if (op == MP_BINARY_OP_LSHIFT) {
|
||||
ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
} else {
|
||||
if (vtype_lhs == VTYPE_UINT) {
|
||||
ASM_LSR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
} else {
|
||||
ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
}
|
||||
}
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (op == MP_BINARY_OP_OR) {
|
||||
ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
} else if (op == MP_BINARY_OP_XOR) {
|
||||
ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
} else if (op == MP_BINARY_OP_AND) {
|
||||
ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
} else if (op == MP_BINARY_OP_ADD) {
|
||||
ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
} else if (op == MP_BINARY_OP_SUBTRACT) {
|
||||
ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
} else if (op == MP_BINARY_OP_MULTIPLY) {
|
||||
ASM_MUL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
|
||||
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
|
||||
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
|
||||
} else if (MP_BINARY_OP_LESS <= op && op <= MP_BINARY_OP_NOT_EQUAL) {
|
||||
// comparison ops are (in enum order):
|
||||
// MP_BINARY_OP_LESS
|
||||
@@ -2354,11 +2405,26 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
// MP_BINARY_OP_LESS_EQUAL
|
||||
// MP_BINARY_OP_MORE_EQUAL
|
||||
// MP_BINARY_OP_NOT_EQUAL
|
||||
|
||||
if (vtype_lhs != vtype_rhs) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("comparison of int and uint"));
|
||||
}
|
||||
|
||||
size_t op_idx = op - MP_BINARY_OP_LESS + (vtype_lhs == VTYPE_UINT ? 0 : 6);
|
||||
|
||||
need_reg_single(emit, REG_RET, 0);
|
||||
#if N_X64
|
||||
asm_x64_xor_r64_r64(emit->as, REG_RET, REG_RET);
|
||||
asm_x64_cmp_r64_with_r64(emit->as, reg_rhs, REG_ARG_2);
|
||||
static byte ops[6] = {
|
||||
static byte ops[6 + 6] = {
|
||||
// unsigned
|
||||
ASM_X64_CC_JB,
|
||||
ASM_X64_CC_JA,
|
||||
ASM_X64_CC_JE,
|
||||
ASM_X64_CC_JBE,
|
||||
ASM_X64_CC_JAE,
|
||||
ASM_X64_CC_JNE,
|
||||
// signed
|
||||
ASM_X64_CC_JL,
|
||||
ASM_X64_CC_JG,
|
||||
ASM_X64_CC_JE,
|
||||
@@ -2366,11 +2432,19 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
ASM_X64_CC_JGE,
|
||||
ASM_X64_CC_JNE,
|
||||
};
|
||||
asm_x64_setcc_r8(emit->as, ops[op - MP_BINARY_OP_LESS], REG_RET);
|
||||
asm_x64_setcc_r8(emit->as, ops[op_idx], REG_RET);
|
||||
#elif N_X86
|
||||
asm_x86_xor_r32_r32(emit->as, REG_RET, REG_RET);
|
||||
asm_x86_cmp_r32_with_r32(emit->as, reg_rhs, REG_ARG_2);
|
||||
static byte ops[6] = {
|
||||
static byte ops[6 + 6] = {
|
||||
// unsigned
|
||||
ASM_X86_CC_JB,
|
||||
ASM_X86_CC_JA,
|
||||
ASM_X86_CC_JE,
|
||||
ASM_X86_CC_JBE,
|
||||
ASM_X86_CC_JAE,
|
||||
ASM_X86_CC_JNE,
|
||||
// signed
|
||||
ASM_X86_CC_JL,
|
||||
ASM_X86_CC_JG,
|
||||
ASM_X86_CC_JE,
|
||||
@@ -2378,24 +2452,62 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
ASM_X86_CC_JGE,
|
||||
ASM_X86_CC_JNE,
|
||||
};
|
||||
asm_x86_setcc_r8(emit->as, ops[op - MP_BINARY_OP_LESS], REG_RET);
|
||||
asm_x86_setcc_r8(emit->as, ops[op_idx], REG_RET);
|
||||
#elif N_THUMB
|
||||
asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
|
||||
static uint16_t ops[6] = {
|
||||
ASM_THUMB_OP_ITE_GE,
|
||||
#if MICROPY_EMIT_THUMB_ARMV7M
|
||||
static uint16_t ops[6 + 6] = {
|
||||
// unsigned
|
||||
ASM_THUMB_OP_ITE_CC,
|
||||
ASM_THUMB_OP_ITE_HI,
|
||||
ASM_THUMB_OP_ITE_EQ,
|
||||
ASM_THUMB_OP_ITE_LS,
|
||||
ASM_THUMB_OP_ITE_CS,
|
||||
ASM_THUMB_OP_ITE_NE,
|
||||
// signed
|
||||
ASM_THUMB_OP_ITE_LT,
|
||||
ASM_THUMB_OP_ITE_GT,
|
||||
ASM_THUMB_OP_ITE_EQ,
|
||||
ASM_THUMB_OP_ITE_GT,
|
||||
ASM_THUMB_OP_ITE_LE,
|
||||
ASM_THUMB_OP_ITE_GE,
|
||||
ASM_THUMB_OP_ITE_EQ,
|
||||
ASM_THUMB_OP_ITE_NE,
|
||||
};
|
||||
static byte ret[6] = { 0, 1, 1, 0, 1, 0, };
|
||||
asm_thumb_op16(emit->as, ops[op - MP_BINARY_OP_LESS]);
|
||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, ret[op - MP_BINARY_OP_LESS]);
|
||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, ret[op - MP_BINARY_OP_LESS] ^ 1);
|
||||
asm_thumb_op16(emit->as, ops[op_idx]);
|
||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
|
||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
|
||||
#else
|
||||
static uint16_t ops[6 + 6] = {
|
||||
// unsigned
|
||||
ASM_THUMB_CC_CC,
|
||||
ASM_THUMB_CC_HI,
|
||||
ASM_THUMB_CC_EQ,
|
||||
ASM_THUMB_CC_LS,
|
||||
ASM_THUMB_CC_CS,
|
||||
ASM_THUMB_CC_NE,
|
||||
// signed
|
||||
ASM_THUMB_CC_LT,
|
||||
ASM_THUMB_CC_GT,
|
||||
ASM_THUMB_CC_EQ,
|
||||
ASM_THUMB_CC_LE,
|
||||
ASM_THUMB_CC_GE,
|
||||
ASM_THUMB_CC_NE,
|
||||
};
|
||||
asm_thumb_bcc_rel9(emit->as, ops[op_idx], 6);
|
||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
|
||||
asm_thumb_b_rel12(emit->as, 4);
|
||||
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
|
||||
#endif
|
||||
#elif N_ARM
|
||||
asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
|
||||
static uint ccs[6] = {
|
||||
static uint ccs[6 + 6] = {
|
||||
// unsigned
|
||||
ASM_ARM_CC_CC,
|
||||
ASM_ARM_CC_HI,
|
||||
ASM_ARM_CC_EQ,
|
||||
ASM_ARM_CC_LS,
|
||||
ASM_ARM_CC_CS,
|
||||
ASM_ARM_CC_NE,
|
||||
// signed
|
||||
ASM_ARM_CC_LT,
|
||||
ASM_ARM_CC_GT,
|
||||
ASM_ARM_CC_EQ,
|
||||
@@ -2403,9 +2515,17 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
ASM_ARM_CC_GE,
|
||||
ASM_ARM_CC_NE,
|
||||
};
|
||||
asm_arm_setcc_reg(emit->as, REG_RET, ccs[op - MP_BINARY_OP_LESS]);
|
||||
asm_arm_setcc_reg(emit->as, REG_RET, ccs[op_idx]);
|
||||
#elif N_XTENSA || N_XTENSAWIN
|
||||
static uint8_t ccs[6] = {
|
||||
static uint8_t ccs[6 + 6] = {
|
||||
// unsigned
|
||||
ASM_XTENSA_CC_LTU,
|
||||
0x80 | ASM_XTENSA_CC_LTU, // for GTU we'll swap args
|
||||
ASM_XTENSA_CC_EQ,
|
||||
0x80 | ASM_XTENSA_CC_GEU, // for LEU we'll swap args
|
||||
ASM_XTENSA_CC_GEU,
|
||||
ASM_XTENSA_CC_NE,
|
||||
// signed
|
||||
ASM_XTENSA_CC_LT,
|
||||
0x80 | ASM_XTENSA_CC_LT, // for GT we'll swap args
|
||||
ASM_XTENSA_CC_EQ,
|
||||
@@ -2413,21 +2533,21 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
ASM_XTENSA_CC_GE,
|
||||
ASM_XTENSA_CC_NE,
|
||||
};
|
||||
uint8_t cc = ccs[op - MP_BINARY_OP_LESS];
|
||||
uint8_t cc = ccs[op_idx];
|
||||
if ((cc & 0x80) == 0) {
|
||||
asm_xtensa_setcc_reg_reg_reg(emit->as, cc, REG_RET, REG_ARG_2, reg_rhs);
|
||||
} else {
|
||||
asm_xtensa_setcc_reg_reg_reg(emit->as, cc & ~0x80, REG_RET, reg_rhs, REG_ARG_2);
|
||||
}
|
||||
#else
|
||||
#error not implemented
|
||||
#error not implemented
|
||||
#endif
|
||||
emit_post_push_reg(emit, VTYPE_BOOL, REG_RET);
|
||||
} else {
|
||||
// TODO other ops not yet implemented
|
||||
adjust_stack(emit, 1);
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"binary op %q not implemented", mp_binary_op_method_name[op]);
|
||||
MP_ERROR_TEXT("binary op %q not implemented"), mp_binary_op_method_name[op]);
|
||||
}
|
||||
} else if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
|
||||
emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_3, &vtype_lhs, REG_ARG_2);
|
||||
@@ -2448,7 +2568,7 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
|
||||
} else {
|
||||
adjust_stack(emit, -1);
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"can't do binary op between '%q' and '%q'",
|
||||
MP_ERROR_TEXT("can't do binary op between '%q' and '%q'"),
|
||||
vtype_to_qstr(vtype_lhs), vtype_to_qstr(vtype_rhs));
|
||||
}
|
||||
}
|
||||
@@ -2626,7 +2746,7 @@ STATIC void emit_native_call_function(emit_t *emit, mp_uint_t n_positional, mp_u
|
||||
break;
|
||||
default:
|
||||
// this can happen when casting a cast: int(int)
|
||||
mp_raise_NotImplementedError("casting");
|
||||
mp_raise_NotImplementedError(MP_ERROR_TEXT("casting"));
|
||||
}
|
||||
} else {
|
||||
assert(vtype_fun == VTYPE_PYOBJ);
|
||||
@@ -2690,7 +2810,7 @@ STATIC void emit_native_return_value(emit_t *emit) {
|
||||
emit_pre_pop_reg(emit, &vtype, return_vtype == VTYPE_PYOBJ ? REG_PARENT_RET : REG_ARG_1);
|
||||
if (vtype != return_vtype) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
|
||||
"return expected '%q' but got '%q'",
|
||||
MP_ERROR_TEXT("return expected '%q' but got '%q'"),
|
||||
vtype_to_qstr(return_vtype), vtype_to_qstr(vtype));
|
||||
}
|
||||
}
|
||||
@@ -2719,7 +2839,7 @@ STATIC void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) {
|
||||
vtype_kind_t vtype_exc;
|
||||
emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise
|
||||
if (vtype_exc != VTYPE_PYOBJ) {
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "must raise an object");
|
||||
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("must raise an object"));
|
||||
}
|
||||
// TODO probably make this 1 call to the runtime (which could even call convert, native_raise(obj, type))
|
||||
emit_call(emit, MP_F_NATIVE_RAISE);
|
||||
@@ -2729,7 +2849,7 @@ STATIC void emit_native_yield(emit_t *emit, int kind) {
|
||||
// Note: 1 (yield) or 3 (yield from) labels are reserved for this function, starting at *emit->label_slot
|
||||
|
||||
if (emit->do_viper_types) {
|
||||
mp_raise_NotImplementedError("native yield");
|
||||
mp_raise_NotImplementedError(MP_ERROR_TEXT("native yield"));
|
||||
}
|
||||
emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
|
||||
|
||||
@@ -2775,6 +2895,7 @@ STATIC void emit_native_yield(emit_t *emit, int kind) {
|
||||
// Found active handler, get its PC
|
||||
ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
|
||||
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user