Update MicroPython from 1.12 to 1.17

This commit is contained in:
Yaya.Cout
2021-12-14 18:16:49 +01:00
parent 5cbce3c116
commit 38faecda29
162 changed files with 8326 additions and 3888 deletions

View File

@@ -69,7 +69,8 @@ static inline KDColor TokenColor(mp_token_kind_t tokenKind) {
&& MP_TOKEN_KW_TRY + 1 == MP_TOKEN_KW_WHILE
&& MP_TOKEN_KW_WHILE + 1 == MP_TOKEN_KW_WITH
&& MP_TOKEN_KW_WITH + 1 == MP_TOKEN_KW_YIELD
&& MP_TOKEN_KW_YIELD + 1 == MP_TOKEN_OP_TILDE,
&& MP_TOKEN_KW_YIELD + 1 == MP_TOKEN_OP_ASSIGN
&& MP_TOKEN_OP_ASSIGN + 1 == MP_TOKEN_OP_TILDE,
"MP_TOKEN order changed, so Code::PythonTextArea::TokenColor might need to change too.");
if (tokenKind >= MP_TOKEN_KW_FALSE && tokenKind <= MP_TOKEN_KW_YIELD) {
return KeywordColor;
@@ -122,7 +123,8 @@ static inline KDColor TokenColor(mp_token_kind_t tokenKind) {
if ((tokenKind >= MP_TOKEN_OP_TILDE && tokenKind <= MP_TOKEN_DEL_DBL_STAR_EQUAL)
|| tokenKind == MP_TOKEN_DEL_EQUAL
|| tokenKind == MP_TOKEN_DEL_MINUS_MORE)
|| tokenKind == MP_TOKEN_DEL_MINUS_MORE
|| tokenKind == MP_TOKEN_OP_ASSIGN)
{
return OperatorColor;
}

View File

@@ -14,6 +14,7 @@ py_src = $(addprefix python/src/py/,\
nlrx86.c \
nlrx64.c \
nlrthumb.c \
nlraarch64.c \
nlrpowerpc.c \
nlrxtensa.c \
nlrsetjmp.c \
@@ -55,6 +56,7 @@ py_src = $(addprefix python/src/py/,\
runtime_utils.c \
scheduler.c \
nativeglue.c \
pairheap.c \
ringbuf.c \
stackctrl.c \
argcheck.c \

View File

@@ -35,9 +35,6 @@ Q(<genexpr>)
Q(<string>)
Q(<stdin>)
Q(utf-8)
#if __EMSCRIPTEN__
Q(pystack exhausted)
#endif
Q(ArithmeticError)
Q(AssertionError)
Q(AttributeError)
@@ -49,7 +46,6 @@ Q(GeneratorExit)
Q(ImportError)
Q(IndentationError)
Q(IndexError)
Q(keepends)
Q(KeyError)
Q(KeyboardInterrupt)
Q(LookupError)
@@ -68,88 +64,47 @@ Q(UnicodeError)
Q(ValueError)
Q(ZeroDivisionError)
Q(_0x0a_)
Q(__abs__)
Q(__add__)
Q(__and__)
Q(__bool__)
Q(__build_class__)
Q(__call__)
Q(__class__)
Q(__contains__)
Q(__delitem__)
Q(__divmod__)
Q(__enter__)
Q(__eq__)
Q(__exit__)
Q(__floordiv__)
Q(__ge__)
Q(__getattr__)
Q(__getitem__)
Q(__gt__)
Q(__hash__)
Q(__iadd__)
Q(__iand__)
Q(__ifloordiv__)
Q(__ilshift__)
Q(__imatmul__)
Q(__imod__)
Q(__import__)
Q(__imul__)
Q(__init__)
Q(__int__)
Q(__invert__)
Q(__ior__)
Q(__ipow__)
Q(__irshift__)
Q(__isub__)
Q(__iter__)
Q(__itruediv__)
Q(__ixor__)
Q(__le__)
Q(__len__)
Q(__lshift__)
Q(__lt__)
Q(__main__)
Q(__matmul__)
Q(__mod__)
Q(__module__)
Q(__mul__)
Q(__name__)
#if __EMSCRIPTEN__
Q(__ne__)
#endif
Q(__neg__)
Q(__new__)
Q(__next__)
Q(__or__)
Q(__path__)
Q(__pos__)
Q(__pow__)
Q(__qualname__)
Q(__radd__)
Q(__rand__)
Q(__repl_print__)
Q(__repr__)
Q(__reversed__)
Q(__rfloordiv__)
Q(__rlshift__)
Q(__rmatmul__)
Q(__rmod__)
Q(__rmul__)
Q(__ror__)
Q(__rpow__)
Q(__rrshift__)
Q(__rshift__)
Q(__rsub__)
Q(__rtruediv__)
Q(__rxor__)
Q(__setitem__)
Q(__str__)
Q(__sub__)
Q(__sub__)
Q(__traceback__)
Q(__truediv__)
Q(__xor__)
Q(_brace_open__colon__hash_b_brace_close_)
Q(_lt_dictcomp_gt_)
Q(_lt_genexpr_gt_)
@@ -170,6 +125,7 @@ Q(all)
Q(any)
Q(append)
Q(args)
Q(argv)
Q(asin)
Q(asinh)
Q(atan)
@@ -179,11 +135,12 @@ Q(bin)
Q(bool)
Q(bound_method)
Q(builtins)
Q(bytearray)
Q(bytecode)
Q(byteorder)
Q(bytes)
Q(callable)
Q(ceil)
Q(center)
Q(choice)
Q(chr)
Q(classmethod)
@@ -200,6 +157,7 @@ Q(cosh)
Q(count)
Q(default)
Q(degrees)
Q(deleter)
Q(dict)
Q(dict_view)
Q(difference)
@@ -207,14 +165,17 @@ Q(difference_update)
Q(dir)
Q(discard)
Q(divmod)
Q(doc)
Q(e)
Q(end)
Q(endswith)
Q(enumerate)
Q(erf)
Q(erfc)
Q(errno)
Q(eval)
Q(exec)
Q(exit)
Q(exp)
Q(expm1)
Q(extend)
@@ -235,6 +196,7 @@ Q(generator)
Q(get)
Q(getattr)
Q(getrandbits)
Q(getter)
Q(globals)
Q(hasattr)
Q(hash)
@@ -243,12 +205,14 @@ Q(heap_unlock)
Q(hex)
Q(id)
Q(imag)
Q(implementation)
Q(index)
Q(input)
Q(insert)
Q(int)
Q(intersection)
Q(intersection_update)
Q(ion)
Q(isalpha)
Q(isdigit)
Q(isdisjoint)
@@ -283,18 +247,23 @@ Q(lower)
Q(lstrip)
Q(map)
Q(math)
Q(matplotlib)
Q(matplotlib_dot_pyplot)
Q(max)
Q(maximum_space_recursion_space_depth_space_exceeded)
Q(micropython)
Q(min)
Q(modf)
Q(module)
Q(modules)
Q(next)
Q(object)
Q(oct)
Q(open)
Q(opt_level)
Q(ord)
Q(os)
Q(path)
Q(pend_throw)
Q(phase)
Q(pi)
@@ -303,10 +272,8 @@ Q(pop)
Q(popitem)
Q(pow)
Q(print)
#if __EMSCRIPTEN__
Q(pystack_space_exhausted)
Q(pystack_use)
#endif
Q(print_exception)
Q(property)
Q(radians)
Q(randint)
Q(random)
@@ -330,13 +297,13 @@ Q(sep)
Q(set)
Q(setattr)
Q(setdefault)
Q(setter)
Q(sin)
Q(sinh)
Q(slice)
Q(sort)
Q(sorted)
Q(split)
Q(splitlines)
Q(sqrt)
Q(start)
Q(startswith)
@@ -349,25 +316,27 @@ Q(sum)
Q(super)
Q(symmetric_difference)
Q(symmetric_difference_update)
Q(sys)
Q(tan)
Q(tanh)
Q(throw)
Q(time)
Q(to_bytes)
Q(trunc)
Q(tuple)
Q(turtle)
Q(type)
Q(uniform)
Q(union)
Q(update)
Q(upper)
Q(random)
Q(usys)
Q(value)
Q(values)
Q(version)
Q(version_info)
Q(zip)
Q(doc)
Q(property)
Q(getter)
Q(setter)
Q(deleter)
// Ion QSTR
Q(ion)

View File

@@ -13,7 +13,7 @@ void mp_hal_set_interrupt_char(int c) {
}
void mp_keyboard_interrupt(void) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_kbd_exception));
MP_STATE_MAIN_THREAD(mp_pending_exception) = MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_kbd_exception));
}
#endif

View File

@@ -38,40 +38,40 @@ void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig) {
size_t n_args_max = (sig >> 1) & 0xffff;
if (n_kw && !takes_kw) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
mp_raise_TypeError("function doesn't take keyword arguments");
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
mp_raise_TypeError(MP_ERROR_TEXT("function doesn't take keyword arguments"));
#endif
}
if (n_args_min == n_args_max) {
if (n_args != n_args_min) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function takes %d positional arguments but %d were given",
n_args_min, n_args));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function takes %d positional arguments but %d were given"),
n_args_min, n_args);
#endif
}
} else {
if (n_args < n_args_min) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function missing %d required positional arguments",
n_args_min - n_args));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function missing %d required positional arguments"),
n_args_min - n_args);
#endif
} else if (n_args > n_args_max) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function expected at most %d arguments, got %d",
n_args_max, n_args));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function expected at most %d arguments, got %d"),
n_args_max, n_args);
#endif
}
}
}
@@ -90,12 +90,11 @@ void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n
mp_map_elem_t *kw = mp_map_lookup(kws, MP_OBJ_NEW_QSTR(allowed[i].qst), MP_MAP_LOOKUP);
if (kw == NULL) {
if (allowed[i].flags & MP_ARG_REQUIRED) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"'%q' argument required", allowed[i].qst));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
mp_raise_msg_varg(&mp_type_TypeError, MP_ERROR_TEXT("'%q' argument required"), allowed[i].qst);
#endif
}
out_vals[i] = allowed[i].defval;
continue;
@@ -114,21 +113,21 @@ void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n
}
}
if (pos_found < n_pos) {
extra_positional:
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
// TODO better error message
mp_raise_TypeError("extra positional arguments given");
}
extra_positional:
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
// TODO better error message
mp_raise_TypeError(MP_ERROR_TEXT("extra positional arguments given"));
#endif
}
if (kws_found < kws->used) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_arg_error_terse_mismatch();
} else {
// TODO better error message
mp_raise_TypeError("extra keyword arguments given");
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_arg_error_terse_mismatch();
#else
// TODO better error message
mp_raise_TypeError(MP_ERROR_TEXT("extra keyword arguments given"));
#endif
}
}
@@ -139,11 +138,11 @@ void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args,
}
NORETURN void mp_arg_error_terse_mismatch(void) {
mp_raise_TypeError("argument num/types mismatch");
mp_raise_TypeError(MP_ERROR_TEXT("argument num/types mismatch"));
}
#if MICROPY_CPYTHON_COMPAT
NORETURN void mp_arg_error_unimpl_kw(void) {
mp_raise_NotImplementedError("keyword argument(s) not yet implemented - use normal args instead");
mp_raise_NotImplementedError(MP_ERROR_TEXT("keyword argument(s) not yet implemented - use normal args instead"));
}
#endif

View File

@@ -38,30 +38,11 @@
#define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
void asm_arm_end_pass(asm_arm_t *as) {
if (as->base.pass == MP_ASM_PASS_EMIT) {
#if defined(__linux__) && defined(__GNUC__)
char *start = mp_asm_base_get_code(&as->base);
char *end = start + mp_asm_base_get_code_size(&as->base);
__builtin___clear_cache(start, end);
#elif defined(__arm__)
// flush I- and D-cache
asm volatile(
"0:"
"mrc p15, 0, r15, c7, c10, 3\n"
"bne 0b\n"
"mov r0, #0\n"
"mcr p15, 0, r0, c7, c7, 0\n"
: : : "r0", "cc");
#endif
}
}
// Insert word into instruction flow
STATIC void emit(asm_arm_t *as, uint op) {
uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
if (c != NULL) {
*(uint32_t*)c = op;
*(uint32_t *)c = op;
}
}
@@ -303,6 +284,11 @@ void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
}
void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
// mov rd, rd, lsr rs
emit_al(as, 0x1a00030 | (rd << 12) | (rs << 8) | rd);
}
void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
// mov rd, rd, asr rs
emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);

View File

@@ -72,7 +72,9 @@ typedef struct _asm_arm_t {
uint stack_adjust;
} asm_arm_t;
void asm_arm_end_pass(asm_arm_t *as);
static inline void asm_arm_end_pass(asm_arm_t *as) {
(void)as;
}
void asm_arm_entry(asm_arm_t *as, int num_locals);
void asm_arm_exit(asm_arm_t *as);
@@ -101,6 +103,7 @@ void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num);
void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label);
void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs);
void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs);
void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs);
// memory
@@ -187,6 +190,7 @@ void asm_arm_bx_reg(asm_arm_t *as, uint reg_src);
#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_arm_mov_reg_pcrel((as), (reg_dest), (label))
#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift))
#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_arm_lsr_reg_reg((as), (reg_dest), (reg_shift))
#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))

View File

@@ -51,7 +51,7 @@ void mp_asm_base_start_pass(mp_asm_base_t *as, int pass) {
memset(as->label_offsets, -1, as->max_num_labels * sizeof(size_t));
} else {
// allocating executable RAM is platform specific
MP_PLAT_ALLOC_EXEC(as->code_offset, (void**)&as->code_base, &as->code_size);
MP_PLAT_ALLOC_EXEC(as->code_offset, (void **)&as->code_base, &as->code_size);
assert(as->code_base != NULL);
}
as->pass = pass;
@@ -84,12 +84,12 @@ void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label) {
}
// align must be a multiple of 2
void mp_asm_base_align(mp_asm_base_t* as, unsigned int align) {
void mp_asm_base_align(mp_asm_base_t *as, unsigned int align) {
as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
}
// this function assumes a little endian machine
void mp_asm_base_data(mp_asm_base_t* as, unsigned int bytesize, uintptr_t val) {
void mp_asm_base_data(mp_asm_base_t *as, unsigned int bytesize, uintptr_t val) {
uint8_t *c = mp_asm_base_get_cur_to_write_bytes(as, bytesize);
if (c != NULL) {
for (unsigned int i = 0; i < bytesize; i++) {

View File

@@ -47,8 +47,8 @@ void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code);
void mp_asm_base_start_pass(mp_asm_base_t *as, int pass);
uint8_t *mp_asm_base_get_cur_to_write_bytes(mp_asm_base_t *as, size_t num_bytes_to_write);
void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label);
void mp_asm_base_align(mp_asm_base_t* as, unsigned int align);
void mp_asm_base_data(mp_asm_base_t* as, unsigned int bytesize, uintptr_t val);
void mp_asm_base_align(mp_asm_base_t *as, unsigned int align);
void mp_asm_base_data(mp_asm_base_t *as, unsigned int bytesize, uintptr_t val);
static inline size_t mp_asm_base_get_code_pos(mp_asm_base_t *as) {
return as->code_offset;

View File

@@ -35,7 +35,6 @@
#include "py/mpstate.h"
#include "py/persistentcode.h"
#include "py/mphal.h"
#include "py/asmthumb.h"
#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32)
@@ -47,6 +46,7 @@
#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
#define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
#if MICROPY_EMIT_THUMB_ARMV7M
// Note: these actually take an imm12 but the high-bit is not encoded here
#define OP_ADD_W_RRI_HI(reg_src) (0xf200 | (reg_src))
#define OP_ADD_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
@@ -55,25 +55,12 @@
#define OP_LDR_W_HI(reg_base) (0xf8d0 | (reg_base))
#define OP_LDR_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12))
#endif
static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
}
void asm_thumb_end_pass(asm_thumb_t *as) {
(void)as;
// could check labels are resolved...
#if __ICACHE_PRESENT == 1
if (as->base.pass == MP_ASM_PASS_EMIT) {
// flush D-cache, so the code emitted is stored in memory
MP_HAL_CLEAN_DCACHE(as->base.code_base, as->base.code_size);
// invalidate I-cache
SCB_InvalidateICache();
}
#endif
}
/*
STATIC void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) {
byte *c = asm_thumb_get_cur_to_write_bytes(as, 1);
@@ -122,7 +109,7 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
// If this Thumb machine code is run from ARM state then add a prelude
// to switch to Thumb state for the duration of the function.
#if MICROPY_DYNAMIC_COMPILER || MICROPY_EMIT_ARM || (defined(__arm__) && !defined(__thumb2__))
#if MICROPY_DYNAMIC_COMPILER || MICROPY_EMIT_ARM || (defined(__arm__) && !defined(__thumb2__) && !defined(__thumb__))
#if MICROPY_DYNAMIC_COMPILER
if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_ARMV6)
#endif
@@ -171,11 +158,21 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
}
asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
if (stack_adjust > 0) {
#if MICROPY_EMIT_THUMB_ARMV7M
if (UNSIGNED_FIT7(stack_adjust)) {
asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
} else {
asm_thumb_op32(as, OP_SUB_W_RRI_HI(ASM_THUMB_REG_SP), OP_SUB_W_RRI_LO(ASM_THUMB_REG_SP, stack_adjust * 4));
}
#else
int adj = stack_adjust;
// we don't expect the stack_adjust to be massive
while (!UNSIGNED_FIT7(adj)) {
asm_thumb_op16(as, OP_SUB_SP(127));
adj -= 127;
}
asm_thumb_op16(as, OP_SUB_SP(adj));
#endif
}
as->push_reglist = reglist;
as->stack_adjust = stack_adjust;
@@ -183,11 +180,21 @@ void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
void asm_thumb_exit(asm_thumb_t *as) {
if (as->stack_adjust > 0) {
#if MICROPY_EMIT_THUMB_ARMV7M
if (UNSIGNED_FIT7(as->stack_adjust)) {
asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
} else {
asm_thumb_op32(as, OP_ADD_W_RRI_HI(ASM_THUMB_REG_SP), OP_ADD_W_RRI_LO(ASM_THUMB_REG_SP, as->stack_adjust * 4));
}
#else
int adj = as->stack_adjust;
// we don't expect the stack_adjust to be massive
while (!UNSIGNED_FIT7(adj)) {
asm_thumb_op16(as, OP_ADD_SP(127));
adj -= 127;
}
asm_thumb_op16(as, OP_ADD_SP(adj));
#endif
}
asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
}
@@ -241,6 +248,8 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
asm_thumb_op16(as, 0x4600 | op_lo);
}
#if MICROPY_EMIT_THUMB_ARMV7M
// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
assert(reg_dest < ASM_THUMB_REG_R15);
@@ -250,6 +259,16 @@ size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i1
return loc;
}
#else
void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src) {
asm_thumb_mov_rlo_i8(as, rlo_dest, (i16_src >> 8) & 0xff);
asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, 8);
asm_thumb_add_rlo_i8(as, rlo_dest, i16_src & 0xff);
}
#endif
#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
@@ -274,8 +293,13 @@ bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
asm_thumb_op16(as, OP_BCC_N(cond, rel));
return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
} else {
#if MICROPY_EMIT_THUMB_ARMV7M
asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
return true;
#else
// this method should not be called for ARMV6M
return false;
#endif
}
}
@@ -296,8 +320,30 @@ size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
size_t loc = mp_asm_base_get_code_pos(&as->base);
#if MICROPY_EMIT_THUMB_ARMV7M
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
#else
// should only be called with lo reg for ARMV6M
assert(reg_dest < ASM_THUMB_REG_R8);
// sanity check that generated code is aligned
assert(!as->base.code_base || !(3u & (uintptr_t)as->base.code_base));
// basically:
// (nop)
// ldr reg_dest, _data
// b 1f
// _data: .word i32
// 1:
if (as->base.code_offset & 2u) {
asm_thumb_op16(as, ASM_THUMB_OP_NOP);
}
asm_thumb_ldr_rlo_pcrel_i8(as, reg_dest, 0);
asm_thumb_op16(as, OP_B_N(2));
asm_thumb_op16(as, i32 & 0xffff);
asm_thumb_op16(as, i32 >> 16);
#endif
return loc;
}
@@ -305,27 +351,68 @@ size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
asm_thumb_mov_rlo_i8(as, reg_dest, i32);
} else if (UNSIGNED_FIT16(i32)) {
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
} else {
asm_thumb_mov_reg_i32(as, reg_dest, i32);
#if MICROPY_EMIT_THUMB_ARMV7M
if (UNSIGNED_FIT16(i32)) {
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
} else {
asm_thumb_mov_reg_i32(as, reg_dest, i32);
}
#else
uint rlo_dest = reg_dest;
assert(rlo_dest < ASM_THUMB_REG_R8); // should never be called for ARMV6M
bool negate = i32 < 0 && ((i32 + i32) & 0xffffffffu); // don't negate 0x80000000
if (negate) {
i32 = -i32;
}
uint clz = __builtin_clz(i32);
uint ctz = i32 ? __builtin_ctz(i32) : 0;
assert(clz + ctz <= 32);
if (clz + ctz >= 24) {
asm_thumb_mov_rlo_i8(as, rlo_dest, (i32 >> ctz) & 0xff);
asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, ctz);
} else if (UNSIGNED_FIT16(i32)) {
asm_thumb_mov_rlo_i16(as, rlo_dest, i32);
} else {
if (negate) {
// no point in negating if we're storing in 32 bit anyway
negate = false;
i32 = -i32;
}
asm_thumb_mov_reg_i32(as, rlo_dest, i32);
}
if (negate) {
asm_thumb_neg_rlo_rlo(as, rlo_dest, rlo_dest);
}
#endif
}
}
#define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
#define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
static void asm_thumb_mov_local_check(asm_thumb_t *as, int word_offset) {
if (as->base.pass >= MP_ASM_PASS_EMIT) {
assert(word_offset >= 0);
if (!UNSIGNED_FIT8(word_offset)) {
mp_raise_NotImplementedError(MP_ERROR_TEXT("too many locals for native method"));
}
}
}
void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
assert(rlo_src < ASM_THUMB_REG_R8);
int word_offset = local_num;
assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
asm_thumb_mov_local_check(as, word_offset);
asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
}
void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
assert(rlo_dest < ASM_THUMB_REG_R8);
int word_offset = local_num;
assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
asm_thumb_mov_local_check(as, word_offset);
asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
}
@@ -341,21 +428,63 @@ void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num)
void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label) {
mp_uint_t dest = get_label_dest(as, label);
mp_int_t rel = dest - as->base.code_offset;
rel -= 4 + 4; // adjust for mov_reg_i16 and then PC+4 prefetch of add_reg_reg
rel |= 1; // to stay in Thumb state when jumping to this address
#if MICROPY_EMIT_THUMB_ARMV7M
rel -= 4 + 4; // adjust for mov_reg_i16 and then PC+4 prefetch of add_reg_reg
asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, rlo_dest, rel); // 4 bytes
#else
rel -= 8 + 4; // adjust for four instructions and then PC+4 prefetch of add_reg_reg
// 6 bytes
asm_thumb_mov_rlo_i16(as, rlo_dest, rel);
// 2 bytes - not always needed, but we want to keep the size the same
asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest);
#endif
asm_thumb_add_reg_reg(as, rlo_dest, ASM_THUMB_REG_R15); // 2 bytes
}
#if MICROPY_EMIT_THUMB_ARMV7M
static inline void asm_thumb_ldr_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
asm_thumb_op32(as, OP_LDR_W_HI(reg_base), OP_LDR_W_LO(reg_dest, word_offset * 4));
}
#endif
void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(word_offset)) {
asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_base, word_offset);
} else {
#if MICROPY_EMIT_THUMB_ARMV7M
asm_thumb_ldr_reg_reg_i12(as, reg_dest, reg_base, word_offset);
#else
word_offset -= 31;
if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8) {
if (UNSIGNED_FIT8(word_offset) && (word_offset < 64 || reg_dest != reg_base)) {
if (word_offset < 64) {
if (reg_dest != reg_base) {
asm_thumb_mov_reg_reg(as, reg_dest, reg_base);
}
asm_thumb_add_rlo_i8(as, reg_dest, word_offset * 4);
} else {
asm_thumb_mov_rlo_i8(as, reg_dest, word_offset);
asm_thumb_lsl_rlo_rlo_i5(as, reg_dest, reg_dest, 2);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
}
} else {
if (reg_dest != reg_base) {
asm_thumb_mov_rlo_i16(as, reg_dest, word_offset * 4);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_dest);
} else {
uint reg_other = reg_dest ^ 7;
asm_thumb_op16(as, OP_PUSH_RLIST((1 << reg_other)));
asm_thumb_mov_rlo_i16(as, reg_other, word_offset * 4);
asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_other);
asm_thumb_op16(as, OP_POP_RLIST((1 << reg_other)));
}
}
} else {
assert(0); // should never be called for ARMV6M
}
asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
#endif
}
}
@@ -377,8 +506,21 @@ void asm_thumb_b_label(asm_thumb_t *as, uint label) {
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
large_jump:
#if MICROPY_EMIT_THUMB_ARMV7M
asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
#else
if (SIGNED_FIT12(rel)) {
// this code path has to be the same number of instructions irrespective of rel
asm_thumb_op16(as, OP_B_N(rel));
} else {
asm_thumb_op16(as, ASM_THUMB_OP_NOP);
if (dest != (mp_uint_t)-1) {
// we have an actual branch > 12 bits; this is not handled yet
mp_raise_NotImplementedError(MP_ERROR_TEXT("native method too big"));
}
}
#endif
}
}
@@ -396,11 +538,29 @@ void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
large_jump:
#if MICROPY_EMIT_THUMB_ARMV7M
asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
#else
// reverse the sense of the branch to jump over a longer branch
asm_thumb_op16(as, OP_BCC_N(cond ^ 1, 0));
asm_thumb_b_label(as, label);
#endif
}
}
void asm_thumb_bcc_rel9(asm_thumb_t *as, int cond, int rel) {
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
assert(SIGNED_FIT9(rel));
asm_thumb_op16(as, OP_BCC_N(cond, rel));
}
void asm_thumb_b_rel12(asm_thumb_t *as, int rel) {
rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
assert(SIGNED_FIT12(rel));
asm_thumb_op16(as, OP_B_N(rel));
}
#define OP_BLX(reg) (0x4780 | ((reg) << 3))
#define OP_SVC(arg) (0xdf00 | (arg))

View File

@@ -70,7 +70,9 @@ typedef struct _asm_thumb_t {
uint32_t stack_adjust;
} asm_thumb_t;
void asm_thumb_end_pass(asm_thumb_t *as);
static inline void asm_thumb_end_pass(asm_thumb_t *as) {
(void)as;
}
void asm_thumb_entry(asm_thumb_t *as, int num_locals);
void asm_thumb_exit(asm_thumb_t *as);
@@ -80,12 +82,19 @@ void asm_thumb_exit(asm_thumb_t *as);
#define ASM_THUMB_OP_IT (0xbf00)
#define ASM_THUMB_OP_ITE_EQ (0xbf0c)
#define ASM_THUMB_OP_ITE_NE (0xbf14)
#define ASM_THUMB_OP_ITE_CS (0xbf2c)
#define ASM_THUMB_OP_ITE_CC (0xbf34)
#define ASM_THUMB_OP_ITE_MI (0xbf4c)
#define ASM_THUMB_OP_ITE_PL (0xbf54)
#define ASM_THUMB_OP_ITE_VS (0xbf6c)
#define ASM_THUMB_OP_ITE_VC (0xbf74)
#define ASM_THUMB_OP_ITE_HI (0xbf8c)
#define ASM_THUMB_OP_ITE_LS (0xbf94)
#define ASM_THUMB_OP_ITE_GE (0xbfac)
#define ASM_THUMB_OP_ITE_LT (0xbfb4)
#define ASM_THUMB_OP_ITE_GT (0xbfcc)
#define ASM_THUMB_OP_ITE_LE (0xbfd4)
#define ASM_THUMB_OP_NOP (0xbf00)
#define ASM_THUMB_OP_WFI (0xbf30)
@@ -95,8 +104,9 @@ void asm_thumb_exit(asm_thumb_t *as);
void asm_thumb_op16(asm_thumb_t *as, uint op);
void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2);
static inline void asm_thumb_it_cc(asm_thumb_t *as, uint cc, uint mask)
{ asm_thumb_op16(as, ASM_THUMB_OP_IT | (cc << 4) | mask); }
static inline void asm_thumb_it_cc(asm_thumb_t *as, uint cc, uint mask) {
asm_thumb_op16(as, ASM_THUMB_OP_IT | (cc << 4) | mask);
}
// FORMAT 1: move shifted register
@@ -129,14 +139,18 @@ static inline void asm_thumb_format_2(asm_thumb_t *as, uint op, uint rlo_dest, u
asm_thumb_op16(as, ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b));
}
static inline void asm_thumb_add_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b)
{ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b); }
static inline void asm_thumb_add_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src)
{ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src); }
static inline void asm_thumb_sub_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b)
{ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b); }
static inline void asm_thumb_sub_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src)
{ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src); }
static inline void asm_thumb_add_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b) {
asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b);
}
static inline void asm_thumb_add_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src) {
asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src);
}
static inline void asm_thumb_sub_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b) {
asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b);
}
static inline void asm_thumb_sub_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src) {
asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src);
}
// FORMAT 3: move/compare/add/subtract immediate
// These instructions all do zero extension of the i8 value
@@ -145,6 +159,7 @@ static inline void asm_thumb_sub_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint
#define ASM_THUMB_FORMAT_3_CMP (0x2800)
#define ASM_THUMB_FORMAT_3_ADD (0x3000)
#define ASM_THUMB_FORMAT_3_SUB (0x3800)
#define ASM_THUMB_FORMAT_3_LDR (0x4800)
#define ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8) ((op) | ((rlo) << 8) | (i8))
@@ -153,10 +168,21 @@ static inline void asm_thumb_format_3(asm_thumb_t *as, uint op, uint rlo, int i8
asm_thumb_op16(as, ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8));
}
static inline void asm_thumb_mov_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_MOV, rlo, i8); }
static inline void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_CMP, rlo, i8); }
static inline void asm_thumb_add_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_ADD, rlo, i8); }
static inline void asm_thumb_sub_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_SUB, rlo, i8); }
static inline void asm_thumb_mov_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_MOV, rlo, i8);
}
static inline void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_CMP, rlo, i8);
}
static inline void asm_thumb_add_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_ADD, rlo, i8);
}
static inline void asm_thumb_sub_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_SUB, rlo, i8);
}
static inline void asm_thumb_ldr_rlo_pcrel_i8(asm_thumb_t *as, uint rlo, uint i8) {
asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_LDR, rlo, i8);
}
// FORMAT 4: ALU operations
@@ -179,7 +205,15 @@ static inline void asm_thumb_sub_rlo_i8(asm_thumb_t *as, uint rlo, int i8) { asm
void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src);
static inline void asm_thumb_cmp_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) { asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_CMP, rlo_dest, rlo_src); }
static inline void asm_thumb_cmp_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_CMP, rlo_dest, rlo_src);
}
static inline void asm_thumb_mvn_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_MVN, rlo_dest, rlo_src);
}
static inline void asm_thumb_neg_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_NEG, rlo_dest, rlo_src);
}
// FORMAT 5: hi register operations (add, cmp, mov, bx)
// For add/cmp/mov, at least one of the args must be a high register
@@ -219,21 +253,54 @@ static inline void asm_thumb_bx_reg(asm_thumb_t *as, uint r_src) {
#define ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset) \
((op) | (((offset) << 6) & 0x07c0) | ((rlo_base) << 3) | (rlo_dest))
static inline void asm_thumb_format_9_10(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_base, uint offset)
{ asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset)); }
static inline void asm_thumb_format_9_10(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_base, uint offset) {
asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset));
}
static inline void asm_thumb_str_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint word_offset)
{ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_src, rlo_base, word_offset); }
static inline void asm_thumb_strb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset)
{ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_src, rlo_base, byte_offset); }
static inline void asm_thumb_strh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset)
{ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_STRH, rlo_src, rlo_base, byte_offset); }
static inline void asm_thumb_ldr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint word_offset)
{ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_dest, rlo_base, word_offset); }
static inline void asm_thumb_ldrb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset)
{ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER , rlo_dest, rlo_base, byte_offset); }
static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset)
{ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_LDRH, rlo_dest, rlo_base, byte_offset); }
static inline void asm_thumb_str_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint word_offset) {
asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_src, rlo_base, word_offset);
}
static inline void asm_thumb_strb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset) {
asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_src, rlo_base, byte_offset);
}
static inline void asm_thumb_strh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset) {
asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_STRH, rlo_src, rlo_base, byte_offset);
}
static inline void asm_thumb_ldr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint word_offset) {
asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_dest, rlo_base, word_offset);
}
static inline void asm_thumb_ldrb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset) {
asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_dest, rlo_base, byte_offset);
}
static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset) {
asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_LDRH, rlo_dest, rlo_base, byte_offset);
}
static inline void asm_thumb_lsl_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_src, uint shift) {
asm_thumb_format_1(as, ASM_THUMB_FORMAT_1_LSL, rlo_dest, rlo_src, shift);
}
static inline void asm_thumb_asr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_src, uint shift) {
asm_thumb_format_1(as, ASM_THUMB_FORMAT_1_ASR, rlo_dest, rlo_src, shift);
}
// FORMAT 11: sign/zero extend
#define ASM_THUMB_FORMAT_11_ENCODE(op, rlo_dest, rlo_src) \
((op) | ((rlo_src) << 3) | (rlo_dest))
#define ASM_THUMB_FORMAT_11_SXTH (0xb200)
#define ASM_THUMB_FORMAT_11_SXTB (0xb240)
#define ASM_THUMB_FORMAT_11_UXTH (0xb280)
#define ASM_THUMB_FORMAT_11_UXTB (0xb2c0)
static inline void asm_thumb_format_11(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
assert(rlo_dest < ASM_THUMB_REG_R8);
assert(rlo_src < ASM_THUMB_REG_R8);
asm_thumb_op16(as, ASM_THUMB_FORMAT_11_ENCODE(op, rlo_dest, rlo_src));
}
static inline void asm_thumb_sxth_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
asm_thumb_format_11(as, ASM_THUMB_FORMAT_11_SXTH, rlo_dest, rlo_src);
}
// TODO convert these to above format style
@@ -241,7 +308,12 @@ static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uin
#define ASM_THUMB_OP_MOVT (0xf2c0)
void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
#if MICROPY_EMIT_THUMB_ARMV7M
size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src);
#else
void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src);
#endif
// these return true if the destination is in range, false otherwise
bool asm_thumb_b_n_label(asm_thumb_t *as, uint label);
@@ -260,6 +332,8 @@ void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint re
void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narrow or wide branch
void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch
void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenience
void asm_thumb_bcc_rel9(asm_thumb_t *as, int cc, int rel);
void asm_thumb_b_rel12(asm_thumb_t *as, int rel);
// Holds a pointer to mp_fun_table
#define ASM_THUMB_REG_FUN_TABLE ASM_THUMB_REG_R7
@@ -315,7 +389,11 @@ void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenien
#define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg))
#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm))
#if MICROPY_EMIT_THUMB_ARMV7M
#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_reg_i16((as), ASM_THUMB_OP_MOVW, (reg_dest), (imm))
#else
#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_rlo_i16((as), (reg_dest), (imm))
#endif
#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_thumb_mov_reg_i32((as), (reg_dest), (imm))
#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num))
#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src))
@@ -323,6 +401,7 @@ void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenien
#define ASM_MOV_REG_PCREL(as, rlo_dest, label) asm_thumb_mov_reg_pcrel((as), (rlo_dest), (label))
#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift))
#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSR, (reg_dest), (reg_shift))
#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src))

View File

@@ -63,15 +63,16 @@
#define OPCODE_SUB_R64_FROM_RM64 (0x29)
#define OPCODE_SUB_I32_FROM_RM64 (0x81) /* /5 */
#define OPCODE_SUB_I8_FROM_RM64 (0x83) /* /5 */
//#define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
//#define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
//#define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
// #define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
// #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
// #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
#define OPCODE_SHL_RM64_CL (0xd3) /* /4 */
#define OPCODE_SHR_RM64_CL (0xd3) /* /5 */
#define OPCODE_SAR_RM64_CL (0xd3) /* /7 */
//#define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
//#define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
// #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
// #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
#define OPCODE_CMP_R64_WITH_RM64 (0x39) /* /r */
//#define OPCODE_CMP_RM32_WITH_R32 (0x3b)
// #define OPCODE_CMP_RM32_WITH_R32 (0x3b)
#define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */
#define OPCODE_TEST_R64_WITH_RM64 (0x85) /* /r */
#define OPCODE_JMP_REL8 (0xeb)
@@ -123,14 +124,14 @@ static inline byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int n) {
}
STATIC void asm_x64_write_byte_1(asm_x64_t *as, byte b1) {
byte* c = asm_x64_get_cur_to_write_bytes(as, 1);
byte *c = asm_x64_get_cur_to_write_bytes(as, 1);
if (c != NULL) {
c[0] = b1;
}
}
STATIC void asm_x64_write_byte_2(asm_x64_t *as, byte b1, byte b2) {
byte* c = asm_x64_get_cur_to_write_bytes(as, 2);
byte *c = asm_x64_get_cur_to_write_bytes(as, 2);
if (c != NULL) {
c[0] = b1;
c[1] = b2;
@@ -138,7 +139,7 @@ STATIC void asm_x64_write_byte_2(asm_x64_t *as, byte b1, byte b2) {
}
STATIC void asm_x64_write_byte_3(asm_x64_t *as, byte b1, byte b2, byte b3) {
byte* c = asm_x64_get_cur_to_write_bytes(as, 3);
byte *c = asm_x64_get_cur_to_write_bytes(as, 3);
if (c != NULL) {
c[0] = b1;
c[1] = b2;
@@ -147,7 +148,7 @@ STATIC void asm_x64_write_byte_3(asm_x64_t *as, byte b1, byte b2, byte b3) {
}
STATIC void asm_x64_write_word32(asm_x64_t *as, int w32) {
byte* c = asm_x64_get_cur_to_write_bytes(as, 4);
byte *c = asm_x64_get_cur_to_write_bytes(as, 4);
if (c != NULL) {
c[0] = IMM32_L0(w32);
c[1] = IMM32_L1(w32);
@@ -157,7 +158,7 @@ STATIC void asm_x64_write_word32(asm_x64_t *as, int w32) {
}
STATIC void asm_x64_write_word64(asm_x64_t *as, int64_t w64) {
byte* c = asm_x64_get_cur_to_write_bytes(as, 8);
byte *c = asm_x64_get_cur_to_write_bytes(as, 8);
if (c != NULL) {
c[0] = IMM32_L0(w64);
c[1] = IMM32_L1(w64);
@@ -284,31 +285,28 @@ void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest
}
void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
assert(src_r64 < 8);
if (dest_r64 < 8) {
if (src_r64 < 8 && dest_r64 < 8) {
asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
} else {
asm_x64_write_byte_3(as, REX_PREFIX | REX_R, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
asm_x64_write_byte_3(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), 0x0f, OPCODE_MOVZX_RM8_TO_R64);
}
asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
}
void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
assert(src_r64 < 8);
if (dest_r64 < 8) {
if (src_r64 < 8 && dest_r64 < 8) {
asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
} else {
asm_x64_write_byte_3(as, REX_PREFIX | REX_R, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
asm_x64_write_byte_3(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), 0x0f, OPCODE_MOVZX_RM16_TO_R64);
}
asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
}
void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
assert(src_r64 < 8);
if (dest_r64 < 8) {
if (src_r64 < 8 && dest_r64 < 8) {
asm_x64_write_byte_1(as, OPCODE_MOV_RM64_TO_R64);
} else {
asm_x64_write_byte_2(as, REX_PREFIX | REX_R, OPCODE_MOV_RM64_TO_R64);
asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_MOV_RM64_TO_R64);
}
asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
}
@@ -378,11 +376,15 @@ void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_XOR_R64_TO_RM64);
}
void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64) {
void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64) {
asm_x64_generic_r64_r64(as, dest_r64, 4, OPCODE_SHL_RM64_CL);
}
void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64) {
void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64) {
asm_x64_generic_r64_r64(as, dest_r64, 5, OPCODE_SHR_RM64_CL);
}
void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64) {
asm_x64_generic_r64_r64(as, dest_r64, 7, OPCODE_SAR_RM64_CL);
}
@@ -500,7 +502,7 @@ void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label) {
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
large_jump:
rel -= 5;
asm_x64_write_byte_1(as, OPCODE_JMP_REL32);
asm_x64_write_word32(as, rel);
@@ -522,7 +524,7 @@ void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label) {
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
large_jump:
rel -= 6;
asm_x64_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
asm_x64_write_word32(as, rel);

View File

@@ -61,10 +61,13 @@
// condition codes, used for jcc and setcc (despite their j-name!)
#define ASM_X64_CC_JB (0x2) // below, unsigned
#define ASM_X64_CC_JAE (0x3) // above or equal, unsigned
#define ASM_X64_CC_JZ (0x4)
#define ASM_X64_CC_JE (0x4)
#define ASM_X64_CC_JNZ (0x5)
#define ASM_X64_CC_JNE (0x5)
#define ASM_X64_CC_JBE (0x6) // below or equal, unsigned
#define ASM_X64_CC_JA (0x7) // above, unsigned
#define ASM_X64_CC_JL (0xc) // less, signed
#define ASM_X64_CC_JGE (0xd) // greater or equal, signed
#define ASM_X64_CC_JLE (0xe) // less or equal, signed
@@ -79,12 +82,12 @@ static inline void asm_x64_end_pass(asm_x64_t *as) {
(void)as;
}
void asm_x64_nop(asm_x64_t* as);
void asm_x64_push_r64(asm_x64_t* as, int src_r64);
void asm_x64_pop_r64(asm_x64_t* as, int dest_r64);
void asm_x64_mov_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
void asm_x64_nop(asm_x64_t *as);
void asm_x64_push_r64(asm_x64_t *as, int src_r64);
void asm_x64_pop_r64(asm_x64_t *as, int dest_r64);
void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64);
void asm_x64_mov_i64_to_r64(asm_x64_t* as, int64_t src_i64, int dest_r64);
void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64);
void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64);
void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
@@ -97,25 +100,26 @@ void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest
void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64);
void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64);
void asm_x64_add_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
void asm_x64_sub_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
void asm_x64_mul_r64_r64(asm_x64_t* as, int dest_r64, int src_r64);
void asm_x64_cmp_r64_with_r64(asm_x64_t* as, int src_r64_a, int src_r64_b);
void asm_x64_test_r8_with_r8(asm_x64_t* as, int src_r64_a, int src_r64_b);
void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64);
void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64);
void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64);
void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b);
void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
void asm_x64_setcc_r8(asm_x64_t* as, int jcc_type, int dest_r8);
void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8);
void asm_x64_jmp_reg(asm_x64_t *as, int src_r64);
void asm_x64_jmp_label(asm_x64_t* as, mp_uint_t label);
void asm_x64_jcc_label(asm_x64_t* as, int jcc_type, mp_uint_t label);
void asm_x64_entry(asm_x64_t* as, int num_locals);
void asm_x64_exit(asm_x64_t* as);
void asm_x64_mov_local_to_r64(asm_x64_t* as, int src_local_num, int dest_r64);
void asm_x64_mov_r64_to_local(asm_x64_t* as, int src_r64, int dest_local_num);
void asm_x64_mov_local_addr_to_r64(asm_x64_t* as, int local_num, int dest_r64);
void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label);
void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label);
void asm_x64_entry(asm_x64_t *as, int num_locals);
void asm_x64_exit(asm_x64_t *as);
void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64);
void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num);
void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64);
void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label);
void asm_x64_call_ind(asm_x64_t* as, size_t fun_id, int temp_r32);
void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r32);
// Holds a pointer to mp_fun_table
#define ASM_X64_REG_FUN_TABLE ASM_X64_REG_RBP
@@ -190,6 +194,7 @@ void asm_x64_call_ind(asm_x64_t* as, size_t fun_id, int temp_r32);
#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x64_mov_reg_pcrel((as), (reg_dest), (label))
#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
#define ASM_LSR_REG(as, reg) asm_x64_shr_r64_cl((as), (reg))
#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))

View File

@@ -41,13 +41,13 @@
#define OPCODE_NOP (0x90)
#define OPCODE_PUSH_R32 (0x50)
//#define OPCODE_PUSH_I32 (0x68)
//#define OPCODE_PUSH_M32 (0xff) /* /6 */
// #define OPCODE_PUSH_I32 (0x68)
// #define OPCODE_PUSH_M32 (0xff) /* /6 */
#define OPCODE_POP_R32 (0x58)
#define OPCODE_RET (0xc3)
//#define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
// #define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
#define OPCODE_MOV_I32_TO_R32 (0xb8)
//#define OPCODE_MOV_I32_TO_RM32 (0xc7)
// #define OPCODE_MOV_I32_TO_RM32 (0xc7)
#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
#define OPCODE_MOV_R32_TO_RM32 (0x89) /* /r */
#define OPCODE_MOV_RM32_TO_R32 (0x8b) /* /r */
@@ -63,15 +63,16 @@
#define OPCODE_SUB_R32_FROM_RM32 (0x29)
#define OPCODE_SUB_I32_FROM_RM32 (0x81) /* /5 */
#define OPCODE_SUB_I8_FROM_RM32 (0x83) /* /5 */
//#define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
//#define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
//#define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
// #define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
// #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
// #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
#define OPCODE_SHL_RM32_CL (0xd3) /* /4 */
#define OPCODE_SHR_RM32_CL (0xd3) /* /5 */
#define OPCODE_SAR_RM32_CL (0xd3) /* /7 */
//#define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
//#define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
// #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
// #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
#define OPCODE_CMP_R32_WITH_RM32 (0x39)
//#define OPCODE_CMP_RM32_WITH_R32 (0x3b)
// #define OPCODE_CMP_RM32_WITH_R32 (0x3b)
#define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */
#define OPCODE_TEST_R32_WITH_RM32 (0x85) /* /r */
#define OPCODE_JMP_REL8 (0xeb)
@@ -103,14 +104,14 @@
#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
STATIC void asm_x86_write_byte_1(asm_x86_t *as, byte b1) {
byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 1);
byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 1);
if (c != NULL) {
c[0] = b1;
}
}
STATIC void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) {
byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2);
byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2);
if (c != NULL) {
c[0] = b1;
c[1] = b2;
@@ -118,7 +119,7 @@ STATIC void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) {
}
STATIC void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) {
byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3);
byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3);
if (c != NULL) {
c[0] = b1;
c[1] = b2;
@@ -127,7 +128,7 @@ STATIC void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) {
}
STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) {
byte* c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
if (c != NULL) {
c[0] = IMM32_L0(w32);
c[1] = IMM32_L1(w32);
@@ -255,11 +256,15 @@ void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_XOR_R32_TO_RM32);
}
void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32) {
void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32) {
asm_x86_generic_r32_r32(as, dest_r32, 4, OPCODE_SHL_RM32_CL);
}
void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32) {
void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32) {
asm_x86_generic_r32_r32(as, dest_r32, 5, OPCODE_SHR_RM32_CL);
}
void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32) {
asm_x86_generic_r32_r32(as, dest_r32, 7, OPCODE_SAR_RM32_CL);
}
@@ -368,7 +373,7 @@ void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
large_jump:
rel -= 5;
asm_x86_write_byte_1(as, OPCODE_JMP_REL32);
asm_x86_write_word32(as, rel);
@@ -390,7 +395,7 @@ void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
}
} else {
// is a forwards jump, so need to assume it's large
large_jump:
large_jump:
rel -= 6;
asm_x86_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
asm_x86_write_word32(as, rel);
@@ -403,7 +408,7 @@ void asm_x86_entry(asm_x86_t *as, int num_locals) {
asm_x86_push_r32(as, ASM_X86_REG_EBX);
asm_x86_push_r32(as, ASM_X86_REG_ESI);
asm_x86_push_r32(as, ASM_X86_REG_EDI);
num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary
num_locals |= 3; // make it odd so stack is aligned on 16 byte boundary
asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, num_locals * WORD_SIZE);
as->num_locals = num_locals;
}
@@ -488,8 +493,7 @@ void asm_x86_push_local(asm_x86_t *as, int local_num) {
asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, local_num));
}
void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32)
{
void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32) {
asm_x86_mov_r32_r32(as, temp_r32, ASM_X86_REG_ESP);
asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_esp(as, local_num), temp_r32);
asm_x86_push_r32(as, temp_r32);
@@ -497,11 +501,14 @@ void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32)
#endif
void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32) {
// TODO align stack on 16-byte boundary before the call
assert(n_args <= 5);
if (n_args > 4) {
asm_x86_push_r32(as, ASM_X86_REG_ARG_5);
assert(n_args <= 4);
// Align stack on 16-byte boundary during the call
unsigned int align = ((n_args + 3) & ~3) - n_args;
if (align) {
asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, align * WORD_SIZE);
}
if (n_args > 3) {
asm_x86_push_r32(as, ASM_X86_REG_ARG_4);
}
@@ -521,7 +528,7 @@ void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r
// the caller must clean up the stack
if (n_args > 0) {
asm_x86_add_i32_to_r32(as, WORD_SIZE * n_args, ASM_X86_REG_ESP);
asm_x86_add_i32_to_r32(as, (n_args + align) * WORD_SIZE, ASM_X86_REG_ESP);
}
}

View File

@@ -60,14 +60,16 @@
#define ASM_X86_REG_ARG_2 ASM_X86_REG_ECX
#define ASM_X86_REG_ARG_3 ASM_X86_REG_EDX
#define ASM_X86_REG_ARG_4 ASM_X86_REG_EBX
#define ASM_X86_REG_ARG_5 ASM_X86_REG_ESI
// condition codes, used for jcc and setcc (despite their j-name!)
#define ASM_X86_CC_JB (0x2) // below, unsigned
#define ASM_X86_CC_JAE (0x3) // above or equal, unsigned
#define ASM_X86_CC_JZ (0x4)
#define ASM_X86_CC_JE (0x4)
#define ASM_X86_CC_JNZ (0x5)
#define ASM_X86_CC_JNE (0x5)
#define ASM_X86_CC_JBE (0x6) // below or equal, unsigned
#define ASM_X86_CC_JA (0x7) // above, unsigned
#define ASM_X86_CC_JL (0xc) // less, signed
#define ASM_X86_CC_JGE (0xd) // greater or equal, signed
#define ASM_X86_CC_JLE (0xe) // less or equal, signed
@@ -82,7 +84,7 @@ static inline void asm_x86_end_pass(asm_x86_t *as) {
(void)as;
}
void asm_x86_mov_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32);
void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
@@ -93,26 +95,27 @@ void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest
void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32);
void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32);
void asm_x86_add_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
void asm_x86_sub_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
void asm_x86_mul_r32_r32(asm_x86_t* as, int dest_r32, int src_r32);
void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b);
void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b);
void asm_x86_test_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b);
void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8);
void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32);
void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32);
void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32);
void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b);
void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b);
void asm_x86_test_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b);
void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8);
void asm_x86_jmp_reg(asm_x86_t *as, int src_r86);
void asm_x86_jmp_label(asm_x86_t* as, mp_uint_t label);
void asm_x86_jcc_label(asm_x86_t* as, mp_uint_t jcc_type, mp_uint_t label);
void asm_x86_entry(asm_x86_t* as, int num_locals);
void asm_x86_exit(asm_x86_t* as);
void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label);
void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label);
void asm_x86_entry(asm_x86_t *as, int num_locals);
void asm_x86_exit(asm_x86_t *as);
void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32);
void asm_x86_mov_local_to_r32(asm_x86_t* as, int src_local_num, int dest_r32);
void asm_x86_mov_r32_to_local(asm_x86_t* as, int src_r32, int dest_local_num);
void asm_x86_mov_local_addr_to_r32(asm_x86_t* as, int local_num, int dest_r32);
void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32);
void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num);
void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32);
void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r64, mp_uint_t label);
void asm_x86_call_ind(asm_x86_t* as, size_t fun_id, mp_uint_t n_args, int temp_r32);
void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32);
// Holds a pointer to mp_fun_table
#define ASM_X86_REG_FUN_TABLE ASM_X86_REG_EBP
@@ -129,7 +132,6 @@ void asm_x86_call_ind(asm_x86_t* as, size_t fun_id, mp_uint_t n_args, int temp_r
#define REG_ARG_2 ASM_X86_REG_ARG_2
#define REG_ARG_3 ASM_X86_REG_ARG_3
#define REG_ARG_4 ASM_X86_REG_ARG_4
#define REG_ARG_5 ASM_X86_REG_ARG_5
// caller-save, so can be used as temporaries
#define REG_TEMP0 ASM_X86_REG_EAX
@@ -187,6 +189,7 @@ void asm_x86_call_ind(asm_x86_t* as, size_t fun_id, mp_uint_t n_args, int temp_r
#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x86_mov_reg_pcrel((as), (reg_dest), (label))
#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg))
#define ASM_LSR_REG(as, reg) asm_x86_shr_r32_cl((as), (reg))
#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg))
#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src))
#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src))

View File

@@ -65,7 +65,7 @@ void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) {
// jump over the constants
asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
as->const_table = (uint32_t*)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
// adjust the stack-pointer to store a0, a12, a13, a14, a15 and locals, 16-byte aligned
as->stack_adjust = (((ASM_XTENSA_NUM_REGS_SAVED + num_locals) * WORD_SIZE) + 15) & ~15;
@@ -105,7 +105,7 @@ void asm_xtensa_entry_win(asm_xtensa_t *as, int num_locals) {
// jump over the constants
asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
as->const_table = (uint32_t*)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
as->stack_adjust = 32 + ((((ASM_XTENSA_NUM_REGS_SAVED_WIN + num_locals) * WORD_SIZE) + 15) & ~15);
asm_xtensa_op_entry(as, ASM_XTENSA_REG_A1, as->stack_adjust);
@@ -173,7 +173,7 @@ void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, ui
size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) {
// load the constant
uint32_t const_table_offset = (uint8_t*)as->const_table - as->base.code_base;
uint32_t const_table_offset = (uint8_t *)as->const_table - as->base.code_base;
size_t loc = const_table_offset + as->cur_const * WORD_SIZE;
asm_xtensa_op_l32r(as, reg_dest, as->base.code_offset, loc);
// store the constant in the table

View File

@@ -243,6 +243,10 @@ static inline void asm_xtensa_op_sll(asm_xtensa_t *as, uint reg_dest, uint reg_s
asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 10, reg_dest, reg_src, 0));
}
static inline void asm_xtensa_op_srl(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 9, reg_dest, 0, reg_src));
}
static inline void asm_xtensa_op_sra(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 11, reg_dest, 0, reg_src));
}
@@ -372,6 +376,11 @@ void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx);
asm_xtensa_op_ssl((as), (reg_shift)); \
asm_xtensa_op_sll((as), (reg_dest), (reg_dest)); \
} while (0)
#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) \
do { \
asm_xtensa_op_ssr((as), (reg_shift)); \
asm_xtensa_op_srl((as), (reg_dest), (reg_dest)); \
} while (0)
#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) \
do { \
asm_xtensa_op_ssr((as), (reg_shift)); \

View File

@@ -75,21 +75,21 @@ const byte *mp_decode_uint_skip(const byte *ptr) {
#endif
STATIC NORETURN void fun_pos_args_mismatch(mp_obj_fun_bc_t *f, size_t expected, size_t given) {
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
// generic message, used also for other argument issues
(void)f;
(void)expected;
(void)given;
mp_arg_error_terse_mismatch();
#elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
#elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
(void)f;
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function takes %d positional arguments but %d were given", expected, given));
#elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"%q() takes %d positional arguments but %d were given",
mp_obj_fun_get_name(MP_OBJ_FROM_PTR(f)), expected, given));
#endif
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function takes %d positional arguments but %d were given"), expected, given);
#elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("%q() takes %d positional arguments but %d were given"),
mp_obj_fun_get_name(MP_OBJ_FROM_PTR(f)), expected, given);
#endif
}
#if DEBUG_PRINT
@@ -195,7 +195,7 @@ void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw
}
// get pointer to arg_names array
const mp_obj_t *arg_names = (const mp_obj_t*)self->const_table;
const mp_obj_t *arg_names = (const mp_obj_t *)self->const_table;
for (size_t i = 0; i < n_kw; i++) {
// the keys in kwargs are expected to be qstr objects
@@ -203,8 +203,8 @@ void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw
for (size_t j = 0; j < n_pos_args + n_kwonly_args; j++) {
if (wanted_arg_name == arg_names[j]) {
if (code_state->state[n_state - 1 - j] != MP_OBJ_NULL) {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function got multiple values for argument '%q'", MP_OBJ_QSTR_VALUE(wanted_arg_name)));
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function got multiple values for argument '%q'"), MP_OBJ_QSTR_VALUE(wanted_arg_name));
}
code_state->state[n_state - 1 - j] = kwargs[2 * i + 1];
goto continue2;
@@ -212,15 +212,15 @@ void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw
}
// Didn't find name match with positional args
if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) == 0) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("unexpected keyword argument");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"unexpected keyword argument '%q'", MP_OBJ_QSTR_VALUE(wanted_arg_name)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("unexpected keyword argument"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("unexpected keyword argument '%q'"), MP_OBJ_QSTR_VALUE(wanted_arg_name));
#endif
}
mp_obj_dict_store(dict, kwargs[2 * i], kwargs[2 * i + 1]);
continue2:;
continue2:;
}
DEBUG_printf("Args with kws flattened: ");
@@ -241,8 +241,8 @@ continue2:;
// Check that all mandatory positional args are specified
while (d < &code_state->state[n_state]) {
if (*d++ == MP_OBJ_NULL) {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function missing required positional argument #%d", &code_state->state[n_state] - d));
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function missing required positional argument #%d"), &code_state->state[n_state] - d);
}
}
@@ -252,13 +252,13 @@ continue2:;
if (code_state->state[n_state - 1 - n_pos_args - i] == MP_OBJ_NULL) {
mp_map_elem_t *elem = NULL;
if ((scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
elem = mp_map_lookup(&((mp_obj_dict_t*)MP_OBJ_TO_PTR(self->extra_args[n_def_pos_args]))->map, arg_names[n_pos_args + i], MP_MAP_LOOKUP);
elem = mp_map_lookup(&((mp_obj_dict_t *)MP_OBJ_TO_PTR(self->extra_args[n_def_pos_args]))->map, arg_names[n_pos_args + i], MP_MAP_LOOKUP);
}
if (elem != NULL) {
code_state->state[n_state - 1 - n_pos_args - i] = elem->value;
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"function missing required keyword argument '%q'", MP_OBJ_QSTR_VALUE(arg_names[n_pos_args + i])));
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("function missing required keyword argument '%q'"), MP_OBJ_QSTR_VALUE(arg_names[n_pos_args + i]));
}
}
}
@@ -266,7 +266,7 @@ continue2:;
} else {
// no keyword arguments given
if (n_kwonly_args != 0) {
mp_raise_TypeError("function missing keyword-only argument");
mp_raise_TypeError(MP_ERROR_TEXT("function missing keyword-only argument"));
}
if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
*var_pos_kw_args = mp_obj_new_dict(0);

View File

@@ -72,98 +72,101 @@
// constN : obj
#define MP_BC_PRELUDE_SIG_ENCODE(S, E, scope, out_byte, out_env) \
do { \
/*// Get values to store in prelude */ \
size_t F = scope->scope_flags & MP_SCOPE_FLAG_ALL_SIG; \
size_t A = scope->num_pos_args; \
size_t K = scope->num_kwonly_args; \
size_t D = scope->num_def_pos_args; \
do { \
/*// Get values to store in prelude */ \
size_t F = scope->scope_flags & MP_SCOPE_FLAG_ALL_SIG; \
size_t A = scope->num_pos_args; \
size_t K = scope->num_kwonly_args; \
size_t D = scope->num_def_pos_args; \
\
/* Adjust S to shrink range, to compress better */ \
S -= 1; \
/* Adjust S to shrink range, to compress better */ \
S -= 1; \
\
/* Encode prelude */ \
/* xSSSSEAA */ \
uint8_t z = (S & 0xf) << 3 | (E & 1) << 2 | (A & 3); \
S >>= 4; \
E >>= 1; \
A >>= 2; \
while (S | E | F | A | K | D) { \
out_byte(out_env, 0x80 | z); \
/* xFSSKAED */ \
z = (F & 1) << 6 | (S & 3) << 4 | (K & 1) << 3 \
| (A & 1) << 2 | (E & 1) << 1 | (D & 1); \
S >>= 2; \
E >>= 1; \
F >>= 1; \
A >>= 1; \
K >>= 1; \
D >>= 1; \
} \
out_byte(out_env, z); \
} while (0)
/* Encode prelude */ \
/* xSSSSEAA */ \
uint8_t z = (S & 0xf) << 3 | (E & 1) << 2 | (A & 3); \
S >>= 4; \
E >>= 1; \
A >>= 2; \
while (S | E | F | A | K | D) { \
out_byte(out_env, 0x80 | z); \
/* xFSSKAED */ \
z = (F & 1) << 6 | (S & 3) << 4 | (K & 1) << 3 \
| (A & 1) << 2 | (E & 1) << 1 | (D & 1); \
S >>= 2; \
E >>= 1; \
F >>= 1; \
A >>= 1; \
K >>= 1; \
D >>= 1; \
} \
out_byte(out_env, z); \
} while (0)
#define MP_BC_PRELUDE_SIG_DECODE_INTO(ip, S, E, F, A, K, D) \
do { \
uint8_t z = *(ip)++; \
/* xSSSSEAA */ \
S = (z >> 3) & 0xf; \
E = (z >> 2) & 0x1; \
F = 0; \
A = z & 0x3; \
K = 0; \
D = 0; \
for (unsigned n = 0; z & 0x80; ++n) { \
z = *(ip)++; \
/* xFSSKAED */ \
S |= (z & 0x30) << (2 * n); \
E |= (z & 0x02) << n; \
F |= ((z & 0x40) >> 6) << n; \
A |= (z & 0x4) << n; \
K |= ((z & 0x08) >> 3) << n; \
D |= (z & 0x1) << n; \
} \
S += 1; \
} while (0)
do { \
uint8_t z = *(ip)++; \
/* xSSSSEAA */ \
S = (z >> 3) & 0xf; \
E = (z >> 2) & 0x1; \
F = 0; \
A = z & 0x3; \
K = 0; \
D = 0; \
for (unsigned n = 0; z & 0x80; ++n) { \
z = *(ip)++; \
/* xFSSKAED */ \
S |= (z & 0x30) << (2 * n); \
E |= (z & 0x02) << n; \
F |= ((z & 0x40) >> 6) << n; \
A |= (z & 0x4) << n; \
K |= ((z & 0x08) >> 3) << n; \
D |= (z & 0x1) << n; \
} \
S += 1; \
} while (0)
#define MP_BC_PRELUDE_SIG_DECODE(ip) \
size_t n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args; \
MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args)
MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args); \
(void)n_state; (void)n_exc_stack; (void)scope_flags; \
(void)n_pos_args; (void)n_kwonly_args; (void)n_def_pos_args
#define MP_BC_PRELUDE_SIZE_ENCODE(I, C, out_byte, out_env) \
do { \
/* Encode bit-wise as: xIIIIIIC */ \
uint8_t z = 0; \
do { \
z = (I & 0x3f) << 1 | (C & 1); \
C >>= 1; \
I >>= 6; \
if (C | I) { \
z |= 0x80; \
} \
out_byte(out_env, z); \
} while (C | I); \
} while (0)
do { \
/* Encode bit-wise as: xIIIIIIC */ \
uint8_t z = 0; \
do { \
z = (I & 0x3f) << 1 | (C & 1); \
C >>= 1; \
I >>= 6; \
if (C | I) { \
z |= 0x80; \
} \
out_byte(out_env, z); \
} while (C | I); \
} while (0)
#define MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, I, C) \
do { \
uint8_t z; \
C = 0; \
I = 0; \
for (unsigned n = 0;; ++n) { \
z = *(ip)++; \
/* xIIIIIIC */ \
C |= (z & 1) << n; \
I |= ((z & 0x7e) >> 1) << (6 * n); \
if (!(z & 0x80)) { \
break; \
} \
} \
} while (0)
do { \
uint8_t z; \
C = 0; \
I = 0; \
for (unsigned n = 0;; ++n) { \
z = *(ip)++; \
/* xIIIIIIC */ \
C |= (z & 1) << n; \
I |= ((z & 0x7e) >> 1) << (6 * n); \
if (!(z & 0x80)) { \
break; \
} \
} \
} while (0)
#define MP_BC_PRELUDE_SIZE_DECODE(ip) \
size_t n_info, n_cell; \
MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, n_info, n_cell)
MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, n_info, n_cell); \
(void)n_info; (void)n_cell
// Sentinel value for mp_code_state_t.exc_sp_idx
#define MP_CODE_STATE_EXC_SP_IDX_SENTINEL ((uint16_t)-1)
@@ -216,7 +219,7 @@ typedef struct _mp_code_state_t {
// Variable-length
mp_obj_t state[0];
// Variable-length, never accessed by name, only as (void*)(state + n_state)
//mp_exc_stack_t exc_state[0];
// mp_exc_stack_t exc_state[0];
} mp_code_state_t;
mp_uint_t mp_decode_uint(const byte **ptr);
@@ -226,16 +229,16 @@ const byte *mp_decode_uint_skip(const byte *ptr);
mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state, volatile mp_obj_t inject_exc);
mp_code_state_t *mp_obj_fun_bc_prepare_codestate(mp_obj_t func, size_t n_args, size_t n_kw, const mp_obj_t *args);
void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args);
void mp_bytecode_print(const void *descr, const byte *code, mp_uint_t len, const mp_uint_t *const_table);
void mp_bytecode_print2(const byte *code, size_t len, const mp_uint_t *const_table);
const byte *mp_bytecode_print_str(const byte *ip);
#define mp_bytecode_print_inst(code, const_table) mp_bytecode_print2(code, 1, const_table)
void mp_bytecode_print(const mp_print_t *print, const void *descr, const byte *code, mp_uint_t len, const mp_uint_t *const_table);
void mp_bytecode_print2(const mp_print_t *print, const byte *code, size_t len, const mp_uint_t *const_table);
const byte *mp_bytecode_print_str(const mp_print_t *print, const byte *ip);
#define mp_bytecode_print_inst(print, code, const_table) mp_bytecode_print2(print, code, 1, const_table)
// Helper macros to access pointer with least significant bits holding flags
#define MP_TAGPTR_PTR(x) ((void*)((uintptr_t)(x) & ~((uintptr_t)3)))
#define MP_TAGPTR_PTR(x) ((void *)((uintptr_t)(x) & ~((uintptr_t)3)))
#define MP_TAGPTR_TAG0(x) ((uintptr_t)(x) & 1)
#define MP_TAGPTR_TAG1(x) ((uintptr_t)(x) & 2)
#define MP_TAGPTR_MAKE(ptr, tag) ((void*)((uintptr_t)(ptr) | (tag)))
#define MP_TAGPTR_MAKE(ptr, tag) ((void *)((uintptr_t)(ptr) | (tag)))
#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE

View File

@@ -48,15 +48,15 @@
#define MP_BC_BASE_BYTE_O (0x50) // LLLLSSDTTTTTEEFF
#define MP_BC_BASE_BYTE_E (0x60) // --BREEEYYI------
#define MP_BC_LOAD_CONST_SMALL_INT_MULTI (0x70) // LLLLLLLLLLLLLLLL
// (0x80) // LLLLLLLLLLLLLLLL
// (0x90) // LLLLLLLLLLLLLLLL
// (0xa0) // LLLLLLLLLLLLLLLL
// (0x80) // LLLLLLLLLLLLLLLL
// (0x90) // LLLLLLLLLLLLLLLL
// (0xa0) // LLLLLLLLLLLLLLLL
#define MP_BC_LOAD_FAST_MULTI (0xb0) // LLLLLLLLLLLLLLLL
#define MP_BC_STORE_FAST_MULTI (0xc0) // SSSSSSSSSSSSSSSS
#define MP_BC_UNARY_OP_MULTI (0xd0) // OOOOOOO
#define MP_BC_BINARY_OP_MULTI (0xd7) // OOOOOOOOO
// (0xe0) // OOOOOOOOOOOOOOOO
// (0xf0) // OOOOOOOOOO------
// (0xe0) // OOOOOOOOOOOOOOOO
// (0xf0) // OOOOOOOOOO------
#define MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM (64)
#define MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS (16)

View File

@@ -46,24 +46,40 @@ size_t mp_binary_get_size(char struct_type, char val_type, size_t *palign) {
size_t size = 0;
int align = 1;
switch (struct_type) {
case '<': case '>':
case '<':
case '>':
switch (val_type) {
case 'b': case 'B':
size = 1; break;
case 'h': case 'H':
size = 2; break;
case 'i': case 'I':
size = 4; break;
case 'l': case 'L':
size = 4; break;
case 'q': case 'Q':
size = 8; break;
case 'P': case 'O': case 'S':
size = sizeof(void*); break;
case 'b':
case 'B':
size = 1;
break;
case 'h':
case 'H':
size = 2;
break;
case 'i':
case 'I':
size = 4;
break;
case 'l':
case 'L':
size = 4;
break;
case 'q':
case 'Q':
size = 8;
break;
case 'P':
case 'O':
case 'S':
size = sizeof(void *);
break;
case 'f':
size = sizeof(float); break;
size = sizeof(float);
break;
case 'd':
size = sizeof(double); break;
size = sizeof(double);
break;
}
break;
case '@': {
@@ -76,35 +92,50 @@ size_t mp_binary_get_size(char struct_type, char val_type, size_t *palign) {
// particular (or any) ABI.
switch (val_type) {
case BYTEARRAY_TYPECODE:
case 'b': case 'B':
align = size = 1; break;
case 'h': case 'H':
case 'b':
case 'B':
align = size = 1;
break;
case 'h':
case 'H':
align = alignof(short);
size = sizeof(short); break;
case 'i': case 'I':
size = sizeof(short);
break;
case 'i':
case 'I':
align = alignof(int);
size = sizeof(int); break;
case 'l': case 'L':
size = sizeof(int);
break;
case 'l':
case 'L':
align = alignof(long);
size = sizeof(long); break;
case 'q': case 'Q':
size = sizeof(long);
break;
case 'q':
case 'Q':
align = alignof(long long);
size = sizeof(long long); break;
case 'P': case 'O': case 'S':
align = alignof(void*);
size = sizeof(void*); break;
size = sizeof(long long);
break;
case 'P':
case 'O':
case 'S':
align = alignof(void *);
size = sizeof(void *);
break;
case 'f':
align = alignof(float);
size = sizeof(float); break;
size = sizeof(float);
break;
case 'd':
align = alignof(double);
size = sizeof(double); break;
size = sizeof(double);
break;
}
}
}
if (size == 0) {
mp_raise_ValueError("bad typecode");
mp_raise_ValueError(MP_ERROR_TEXT("bad typecode"));
}
if (palign != NULL) {
@@ -117,44 +148,44 @@ mp_obj_t mp_binary_get_val_array(char typecode, void *p, size_t index) {
mp_int_t val = 0;
switch (typecode) {
case 'b':
val = ((signed char*)p)[index];
val = ((signed char *)p)[index];
break;
case BYTEARRAY_TYPECODE:
case 'B':
val = ((unsigned char*)p)[index];
val = ((unsigned char *)p)[index];
break;
case 'h':
val = ((short*)p)[index];
val = ((short *)p)[index];
break;
case 'H':
val = ((unsigned short*)p)[index];
val = ((unsigned short *)p)[index];
break;
case 'i':
return mp_obj_new_int(((int*)p)[index]);
return mp_obj_new_int(((int *)p)[index]);
case 'I':
return mp_obj_new_int_from_uint(((unsigned int*)p)[index]);
return mp_obj_new_int_from_uint(((unsigned int *)p)[index]);
case 'l':
return mp_obj_new_int(((long*)p)[index]);
return mp_obj_new_int(((long *)p)[index]);
case 'L':
return mp_obj_new_int_from_uint(((unsigned long*)p)[index]);
return mp_obj_new_int_from_uint(((unsigned long *)p)[index]);
#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
case 'q':
return mp_obj_new_int_from_ll(((long long*)p)[index]);
return mp_obj_new_int_from_ll(((long long *)p)[index]);
case 'Q':
return mp_obj_new_int_from_ull(((unsigned long long*)p)[index]);
return mp_obj_new_int_from_ull(((unsigned long long *)p)[index]);
#endif
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
case 'f':
return mp_obj_new_float(((float*)p)[index]);
return mp_obj_new_float_from_f(((float *)p)[index]);
case 'd':
return mp_obj_new_float(((double*)p)[index]);
#endif
return mp_obj_new_float_from_d(((double *)p)[index]);
#endif
// Extension to CPython: array of objects
case 'O':
return ((mp_obj_t*)p)[index];
return ((mp_obj_t *)p)[index];
// Extension to CPython: array of pointers
case 'P':
return mp_obj_new_int((mp_int_t)(uintptr_t)((void**)p)[index]);
return mp_obj_new_int((mp_int_t)(uintptr_t)((void **)p)[index]);
}
return MP_OBJ_NEW_SMALL_INT(val);
}
@@ -171,7 +202,7 @@ long long mp_binary_get_int(size_t size, bool is_signed, bool big_endian, const
delta = 1;
}
long long val = 0;
unsigned long long val = 0;
if (is_signed && *src & 0x80) {
val = -1;
}
@@ -206,16 +237,20 @@ mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte *p_base, byte *
if (val_type == 'O') {
return (mp_obj_t)(mp_uint_t)val;
} else if (val_type == 'S') {
const char *s_val = (const char*)(uintptr_t)(mp_uint_t)val;
const char *s_val = (const char *)(uintptr_t)(mp_uint_t)val;
return mp_obj_new_str(s_val, strlen(s_val));
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
} else if (val_type == 'f') {
union { uint32_t i; float f; } fpu = {val};
return mp_obj_new_float(fpu.f);
union { uint32_t i;
float f;
} fpu = {val};
return mp_obj_new_float_from_f(fpu.f);
} else if (val_type == 'd') {
union { uint64_t i; double f; } fpu = {val};
return mp_obj_new_float(fpu.f);
#endif
union { uint64_t i;
double f;
} fpu = {val};
return mp_obj_new_float_from_d(fpu.f);
#endif
} else if (is_signed(val_type)) {
if ((long long)MP_SMALL_INT_MIN <= val && val <= (long long)MP_SMALL_INT_MAX) {
return mp_obj_new_int((mp_int_t)val);
@@ -236,13 +271,13 @@ void mp_binary_set_int(size_t val_sz, bool big_endian, byte *dest, mp_uint_t val
memcpy(dest, &val, val_sz);
} else if (MP_ENDIANNESS_BIG && big_endian) {
// only copy the least-significant val_sz bytes
memcpy(dest, (byte*)&val + sizeof(mp_uint_t) - val_sz, val_sz);
memcpy(dest, (byte *)&val + sizeof(mp_uint_t) - val_sz, val_sz);
} else {
const byte *src;
if (MP_ENDIANNESS_LITTLE) {
src = (const byte*)&val + val_sz;
src = (const byte *)&val + val_sz;
} else {
src = (const byte*)&val + sizeof(mp_uint_t);
src = (const byte *)&val + sizeof(mp_uint_t);
}
while (val_sz--) {
*dest++ = *--src;
@@ -271,17 +306,22 @@ void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p
case 'O':
val = (mp_uint_t)val_in;
break;
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
case 'f': {
union { uint32_t i; float f; } fp_sp;
fp_sp.f = mp_obj_get_float(val_in);
union { uint32_t i;
float f;
} fp_sp;
fp_sp.f = mp_obj_get_float_to_f(val_in);
val = fp_sp.i;
break;
}
case 'd': {
union { uint64_t i64; uint32_t i32[2]; double f; } fp_dp;
fp_dp.f = mp_obj_get_float(val_in);
if (BYTES_PER_WORD == 8) {
union { uint64_t i64;
uint32_t i32[2];
double f;
} fp_dp;
fp_dp.f = mp_obj_get_float_to_d(val_in);
if (MP_BYTES_PER_OBJ_WORD == 8) {
val = fp_dp.i64;
} else {
int be = struct_type == '>';
@@ -291,25 +331,25 @@ void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p
}
break;
}
#endif
#endif
default:
#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
if (mp_obj_is_type(val_in, &mp_type_int)) {
mp_obj_int_to_bytes_impl(val_in, struct_type == '>', size, p);
return;
} else
}
#endif
{
val = mp_obj_get_int(val_in);
// zero/sign extend if needed
if (BYTES_PER_WORD < 8 && size > sizeof(val)) {
int c = (is_signed(val_type) && (mp_int_t)val < 0) ? 0xff : 0x00;
memset(p, c, size);
if (struct_type == '>') {
p += size - sizeof(val);
}
val = mp_obj_get_int(val_in);
// zero/sign extend if needed
if (MP_BYTES_PER_OBJ_WORD < 8 && size > sizeof(val)) {
int c = (mp_int_t)val < 0 ? 0xff : 0x00;
memset(p, c, size);
if (struct_type == '>') {
p += size - sizeof(val);
}
}
break;
}
mp_binary_set_int(MIN((size_t)size, sizeof(val)), struct_type == '>', p, val);
@@ -317,24 +357,24 @@ void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p
void mp_binary_set_val_array(char typecode, void *p, size_t index, mp_obj_t val_in) {
switch (typecode) {
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
case 'f':
((float*)p)[index] = mp_obj_get_float(val_in);
((float *)p)[index] = mp_obj_get_float_to_f(val_in);
break;
case 'd':
((double*)p)[index] = mp_obj_get_float(val_in);
((double *)p)[index] = mp_obj_get_float_to_d(val_in);
break;
#endif
#endif
// Extension to CPython: array of objects
case 'O':
((mp_obj_t*)p)[index] = val_in;
((mp_obj_t *)p)[index] = val_in;
break;
default:
#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
if (mp_obj_is_type(val_in, &mp_type_int)) {
size_t size = mp_binary_get_size('@', typecode, NULL);
mp_obj_int_to_bytes_impl(val_in, MP_ENDIANNESS_BIG,
size, (uint8_t*)p + index * size);
size, (uint8_t *)p + index * size);
return;
}
#endif
@@ -345,49 +385,49 @@ void mp_binary_set_val_array(char typecode, void *p, size_t index, mp_obj_t val_
void mp_binary_set_val_array_from_int(char typecode, void *p, size_t index, mp_int_t val) {
switch (typecode) {
case 'b':
((signed char*)p)[index] = val;
((signed char *)p)[index] = val;
break;
case BYTEARRAY_TYPECODE:
case 'B':
((unsigned char*)p)[index] = val;
((unsigned char *)p)[index] = val;
break;
case 'h':
((short*)p)[index] = val;
((short *)p)[index] = val;
break;
case 'H':
((unsigned short*)p)[index] = val;
((unsigned short *)p)[index] = val;
break;
case 'i':
((int*)p)[index] = val;
((int *)p)[index] = val;
break;
case 'I':
((unsigned int*)p)[index] = val;
((unsigned int *)p)[index] = val;
break;
case 'l':
((long*)p)[index] = val;
((long *)p)[index] = val;
break;
case 'L':
((unsigned long*)p)[index] = val;
((unsigned long *)p)[index] = val;
break;
#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
case 'q':
((long long*)p)[index] = val;
((long long *)p)[index] = val;
break;
case 'Q':
((unsigned long long*)p)[index] = val;
((unsigned long long *)p)[index] = val;
break;
#endif
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
case 'f':
((float*)p)[index] = val;
((float *)p)[index] = (float)val;
break;
case 'd':
((double*)p)[index] = val;
((double *)p)[index] = (double)val;
break;
#endif
#endif
// Extension to CPython: array of pointers
case 'P':
((void**)p)[index] = (void*)(uintptr_t)val;
((void **)p)[index] = (void *)(uintptr_t)val;
break;
}
}

View File

@@ -103,6 +103,7 @@ extern const mp_obj_module_t mp_module_thread;
extern const mp_obj_dict_t mp_module_builtins_globals;
// extmod modules
extern const mp_obj_module_t mp_module_uasyncio;
extern const mp_obj_module_t mp_module_uerrno;
extern const mp_obj_module_t mp_module_uctypes;
extern const mp_obj_module_t mp_module_uzlib;

View File

@@ -90,11 +90,17 @@ STATIC mp_obj_t mp_builtin_compile(size_t n_args, const mp_obj_t *args) {
qstr mode = mp_obj_str_get_qstr(args[2]);
mp_parse_input_kind_t parse_input_kind;
switch (mode) {
case MP_QSTR_single: parse_input_kind = MP_PARSE_SINGLE_INPUT; break;
case MP_QSTR_exec: parse_input_kind = MP_PARSE_FILE_INPUT; break;
case MP_QSTR_eval: parse_input_kind = MP_PARSE_EVAL_INPUT; break;
case MP_QSTR_single:
parse_input_kind = MP_PARSE_SINGLE_INPUT;
break;
case MP_QSTR_exec:
parse_input_kind = MP_PARSE_FILE_INPUT;
break;
case MP_QSTR_eval:
parse_input_kind = MP_PARSE_EVAL_INPUT;
break;
default:
mp_raise_ValueError("bad compile mode");
mp_raise_ValueError(MP_ERROR_TEXT("bad compile mode"));
}
mp_obj_code_t *code = m_new_obj(mp_obj_code_t);
@@ -130,17 +136,18 @@ STATIC mp_obj_t eval_exec_helper(size_t n_args, const mp_obj_t *args, mp_parse_i
}
#endif
size_t str_len;
const char *str = mp_obj_str_get_data(args[0], &str_len);
// Extract the source code.
mp_buffer_info_t bufinfo;
mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
// create the lexer
// MP_PARSE_SINGLE_INPUT is used to indicate a file input
mp_lexer_t *lex;
if (MICROPY_PY_BUILTINS_EXECFILE && parse_input_kind == MP_PARSE_SINGLE_INPUT) {
lex = mp_lexer_new_from_file(str);
lex = mp_lexer_new_from_file(bufinfo.buf);
parse_input_kind = MP_PARSE_FILE_INPUT;
} else {
lex = mp_lexer_new_from_str_len(MP_QSTR__lt_string_gt_, str, str_len, 0);
lex = mp_lexer_new_from_str_len(MP_QSTR__lt_string_gt_, bufinfo.buf, bufinfo.len, 0);
}
return mp_parse_compile_execute(lex, parse_input_kind, globals, locals);

View File

@@ -33,18 +33,18 @@
#if MICROPY_PY_BUILTINS_HELP
const char mp_help_default_text[] =
"Welcome to MicroPython!\n"
"\n"
"For online docs please visit http://docs.micropython.org/\n"
"\n"
"Control commands:\n"
" CTRL-A -- on a blank line, enter raw REPL mode\n"
" CTRL-B -- on a blank line, enter normal REPL mode\n"
" CTRL-C -- interrupt a running program\n"
" CTRL-D -- on a blank line, exit or do a soft reset\n"
" CTRL-E -- on a blank line, enter paste mode\n"
"\n"
"For further help on a specific object, type help(obj)\n"
"Welcome to MicroPython!\n"
"\n"
"For online docs please visit http://docs.micropython.org/\n"
"\n"
"Control commands:\n"
" CTRL-A -- on a blank line, enter raw REPL mode\n"
" CTRL-B -- on a blank line, enter normal REPL mode\n"
" CTRL-C -- interrupt a running program\n"
" CTRL-D -- on a blank line, exit or do a soft reset\n"
" CTRL-E -- on a blank line, enter paste mode\n"
"\n"
"For further help on a specific object, type help(obj)\n"
;
STATIC void mp_help_print_info_about_object(mp_obj_t name_o, mp_obj_t value) {
@@ -91,7 +91,7 @@ STATIC void mp_help_print_modules(void) {
#endif
// sort the list so it's printed in alphabetical order
mp_obj_list_sort(1, &list, (mp_map_t*)&mp_const_empty_map);
mp_obj_list_sort(1, &list, (mp_map_t *)&mp_const_empty_map);
// print the list of modules in a column-first order
#define NUM_COLUMNS (4)
@@ -134,7 +134,7 @@ STATIC void mp_help_print_obj(const mp_obj_t obj) {
}
#endif
mp_obj_type_t *type = mp_obj_get_type(obj);
const mp_obj_type_t *type = mp_obj_get_type(obj);
// try to print something sensible about the given object
mp_print_str(MP_PYTHON_PRINTER, "object ");

View File

@@ -96,19 +96,13 @@ STATIC mp_import_stat_t stat_dir_or_file(vstr_t *path) {
}
STATIC mp_import_stat_t find_file(const char *file_str, uint file_len, vstr_t *dest) {
#if MICROPY_PY_SYS
#if MICROPY_PY_SYS
// extract the list of paths
size_t path_num;
mp_obj_t *path_items;
mp_obj_list_get(mp_sys_path, &path_num, &path_items);
if (path_num == 0) {
#endif
// mp_sys_path is empty, so just use the given file name
vstr_add_strn(dest, file_str, file_len);
return stat_dir_or_file(dest);
#if MICROPY_PY_SYS
} else {
if (path_num != 0) {
// go through each path looking for a directory or file
for (size_t i = 0; i < path_num; i++) {
vstr_reset(dest);
@@ -128,7 +122,11 @@ STATIC mp_import_stat_t find_file(const char *file_str, uint file_len, vstr_t *d
// could not find a directory or file
return MP_IMPORT_STAT_NO_EXIST;
}
#endif
#endif
// mp_sys_path is empty, so just use the given file name
vstr_add_strn(dest, file_str, file_len);
return stat_dir_or_file(dest);
}
#if MICROPY_MODULE_FROZEN_STR || MICROPY_ENABLE_COMPILER
@@ -144,8 +142,8 @@ STATIC void do_load_from_lexer(mp_obj_t module_obj, mp_lexer_t *lex) {
}
#endif
#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_MODULE_FROZEN_MPY
STATIC void do_execute_raw_code(mp_obj_t module_obj, mp_raw_code_t *raw_code, const char* source_name) {
#if (MICROPY_HAS_FILE_READER && MICROPY_PERSISTENT_CODE_LOAD) || MICROPY_MODULE_FROZEN_MPY
STATIC void do_execute_raw_code(mp_obj_t module_obj, mp_raw_code_t *raw_code, const char *source_name) {
(void)source_name;
#if MICROPY_PY___FILE__
@@ -230,7 +228,7 @@ STATIC void do_load(mp_obj_t module_obj, vstr_t *file) {
}
#else
// If we get here then the file was not frozen and we can't compile scripts.
mp_raise_msg(&mp_type_ImportError, "script compilation not supported");
mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("script compilation not supported"));
#endif
}
@@ -246,14 +244,14 @@ STATIC void chop_component(const char *start, const char **end) {
}
mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
#if DEBUG_PRINT
#if DEBUG_PRINT
DEBUG_printf("__import__:\n");
for (size_t i = 0; i < n_args; i++) {
DEBUG_printf(" ");
mp_obj_print(args[i], PRINT_REPR);
DEBUG_printf("\n");
}
#endif
#endif
mp_obj_t module_name = args[0];
mp_obj_t fromtuple = mp_const_none;
@@ -292,12 +290,12 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
mp_map_elem_t *elem = mp_map_lookup(globals_map, MP_OBJ_NEW_QSTR(MP_QSTR___path__), MP_MAP_LOOKUP);
bool is_pkg = (elem != NULL);
#if DEBUG_PRINT
#if DEBUG_PRINT
DEBUG_printf("Current module/package: ");
mp_obj_print(this_name_q, PRINT_REPR);
DEBUG_printf(", is_package: %d", is_pkg);
DEBUG_printf("\n");
#endif
#endif
size_t this_name_l;
const char *this_name = mp_obj_str_get_data(this_name_q, &this_name_l);
@@ -315,7 +313,7 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
// We must have some component left over to import from
if (p == this_name) {
mp_raise_ValueError("cannot perform relative import");
mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("can't perform relative import"));
}
uint new_mod_l = (mod_len == 0 ? (size_t)(p - this_name) : (size_t)(p - this_name) + 1 + mod_len);
@@ -398,12 +396,11 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
#endif
if (module_obj == MP_OBJ_NULL) {
// couldn't find the file, so fail
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_msg(&mp_type_ImportError, "module not found");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ImportError,
"no module named '%q'", mod_name));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("module not found"));
#else
mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("no module named '%q'"), mod_name);
#endif
}
} else {
// found the file, so get the module
@@ -443,7 +440,7 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
vstr_add_char(&path, PATH_SEP_CHAR);
vstr_add_str(&path, "__init__.py");
if (stat_file_py_or_mpy(&path) != MP_IMPORT_STAT_FILE) {
//mp_warning("%s is imported as namespace package", vstr_str(&path));
// mp_warning("%s is imported as namespace package", vstr_str(&path));
} else {
do_load(module_obj, &path);
}
@@ -481,7 +478,7 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
// Check that it's not a relative import
if (n_args >= 5 && MP_OBJ_SMALL_INT_VALUE(args[4]) != 0) {
mp_raise_NotImplementedError("relative import");
mp_raise_NotImplementedError(MP_ERROR_TEXT("relative import"));
}
// Check if module already exists, and return it if it does
@@ -502,12 +499,11 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
#endif
// Couldn't find the module, so fail
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_msg(&mp_type_ImportError, "module not found");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ImportError,
"no module named '%q'", module_name_qstr));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("module not found"));
#else
mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("no module named '%q'"), module_name_qstr);
#endif
}
#endif // MICROPY_ENABLE_EXTERNAL_IMPORT

File diff suppressed because it is too large Load Diff

276
python/src/py/dynruntime.h Normal file
View File

@@ -0,0 +1,276 @@
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2019 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MICROPY_INCLUDED_PY_DYNRUNTIME_H
#define MICROPY_INCLUDED_PY_DYNRUNTIME_H
// This header file contains definitions to dynamically implement the static
// MicroPython runtime API defined in py/obj.h and py/runtime.h.
#include "py/nativeglue.h"
#include "py/objstr.h"
#include "py/objtype.h"
#if !MICROPY_ENABLE_DYNRUNTIME
#error "dynruntime.h included in non-dynamic-module build."
#endif
#undef MP_ROM_QSTR
#undef MP_OBJ_QSTR_VALUE
#undef MP_OBJ_NEW_QSTR
#undef mp_const_none
#undef mp_const_false
#undef mp_const_true
#undef mp_const_empty_tuple
#undef nlr_raise
/******************************************************************************/
// Memory allocation
#define m_malloc(n) (m_malloc_dyn((n)))
#define m_free(ptr) (m_free_dyn((ptr)))
#define m_realloc(ptr, new_num_bytes) (m_realloc_dyn((ptr), (new_num_bytes)))
static inline void *m_malloc_dyn(size_t n) {
// TODO won't raise on OOM
return mp_fun_table.realloc_(NULL, n, false);
}
static inline void m_free_dyn(void *ptr) {
mp_fun_table.realloc_(ptr, 0, false);
}
static inline void *m_realloc_dyn(void *ptr, size_t new_num_bytes) {
// TODO won't raise on OOM
return mp_fun_table.realloc_(ptr, new_num_bytes, true);
}
/******************************************************************************/
// Printing
#define mp_plat_print (*mp_fun_table.plat_print)
#define mp_printf(p, ...) (mp_fun_table.printf_((p), __VA_ARGS__))
#define mp_vprintf(p, fmt, args) (mp_fun_table.vprintf_((p), (fmt), (args)))
/******************************************************************************/
// Types and objects
#define MP_OBJ_NEW_QSTR(x) MP_OBJ_NEW_QSTR_##x
#define mp_type_type (*mp_fun_table.type_type)
#define mp_type_str (*mp_fun_table.type_str)
#define mp_type_tuple (*((mp_obj_base_t *)mp_const_empty_tuple)->type)
#define mp_type_list (*mp_fun_table.type_list)
#define mp_type_EOFError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_EOFError)))
#define mp_type_IndexError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_IndexError)))
#define mp_type_KeyError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_KeyError)))
#define mp_type_NotImplementedError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_NotImplementedError)))
#define mp_type_RuntimeError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_RuntimeError)))
#define mp_type_TypeError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_TypeError)))
#define mp_type_ValueError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_ValueError)))
#define mp_stream_read_obj (*mp_fun_table.stream_read_obj)
#define mp_stream_readinto_obj (*mp_fun_table.stream_readinto_obj)
#define mp_stream_unbuffered_readline_obj (*mp_fun_table.stream_unbuffered_readline_obj)
#define mp_stream_write_obj (*mp_fun_table.stream_write_obj)
#define mp_const_none ((mp_obj_t)mp_fun_table.const_none)
#define mp_const_false ((mp_obj_t)mp_fun_table.const_false)
#define mp_const_true ((mp_obj_t)mp_fun_table.const_true)
#define mp_const_empty_tuple (mp_fun_table.new_tuple(0, NULL))
#define mp_obj_new_bool(b) ((b) ? (mp_obj_t)mp_fun_table.const_true : (mp_obj_t)mp_fun_table.const_false)
#define mp_obj_new_int(i) (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_INT))
#define mp_obj_new_int_from_uint(i) (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_UINT))
#define mp_obj_new_str(data, len) (mp_fun_table.obj_new_str((data), (len)))
#define mp_obj_new_str_of_type(t, d, l) (mp_obj_new_str_of_type_dyn((t), (d), (l)))
#define mp_obj_new_bytes(data, len) (mp_fun_table.obj_new_bytes((data), (len)))
#define mp_obj_new_bytearray_by_ref(n, i) (mp_fun_table.obj_new_bytearray_by_ref((n), (i)))
#define mp_obj_new_tuple(n, items) (mp_fun_table.new_tuple((n), (items)))
#define mp_obj_new_list(n, items) (mp_fun_table.new_list((n), (items)))
#define mp_obj_get_type(o) (mp_fun_table.obj_get_type((o)))
#define mp_obj_cast_to_native_base(o, t) (mp_obj_cast_to_native_base_dyn((o), (t)))
#define mp_obj_get_int(o) (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_INT))
#define mp_obj_get_int_truncated(o) (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_UINT))
#define mp_obj_str_get_str(s) (mp_obj_str_get_data_dyn((s), NULL))
#define mp_obj_str_get_data(o, len) (mp_obj_str_get_data_dyn((o), (len)))
#define mp_get_buffer_raise(o, bufinfo, fl) (mp_fun_table.get_buffer_raise((o), (bufinfo), (fl)))
#define mp_get_stream_raise(s, flags) (mp_fun_table.get_stream_raise((s), (flags)))
#define mp_obj_len(o) (mp_obj_len_dyn(o))
#define mp_obj_subscr(base, index, val) (mp_fun_table.obj_subscr((base), (index), (val)))
#define mp_obj_get_array(o, len, items) (mp_obj_get_array_dyn((o), (len), (items)))
#define mp_obj_list_append(list, item) (mp_fun_table.list_append((list), (item)))
static inline mp_obj_t mp_obj_new_str_of_type_dyn(const mp_obj_type_t *type, const byte *data, size_t len) {
if (type == &mp_type_str) {
return mp_obj_new_str((const char *)data, len);
} else {
return mp_obj_new_bytes(data, len);
}
}
static inline mp_obj_t mp_obj_cast_to_native_base_dyn(mp_obj_t self_in, mp_const_obj_t native_type) {
const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
if (MP_OBJ_FROM_PTR(self_type) == native_type) {
return self_in;
} else if (self_type->parent != native_type) {
// The self_in object is not a direct descendant of native_type, so fail the cast.
// This is a very simple version of mp_obj_is_subclass_fast that could be improved.
return MP_OBJ_NULL;
} else {
mp_obj_instance_t *self = (mp_obj_instance_t *)MP_OBJ_TO_PTR(self_in);
return self->subobj[0];
}
}
static inline void *mp_obj_str_get_data_dyn(mp_obj_t o, size_t *l) {
mp_buffer_info_t bufinfo;
mp_get_buffer_raise(o, &bufinfo, MP_BUFFER_READ);
if (l != NULL) {
*l = bufinfo.len;
}
return bufinfo.buf;
}
static inline mp_obj_t mp_obj_len_dyn(mp_obj_t o) {
// If bytes implemented MP_UNARY_OP_LEN could use: mp_unary_op(MP_UNARY_OP_LEN, o)
return mp_fun_table.call_function_n_kw(mp_fun_table.load_name(MP_QSTR_len), 1, &o);
}
/******************************************************************************/
// General runtime functions
#define mp_load_name(qst) (mp_fun_table.load_name((qst)))
#define mp_load_global(qst) (mp_fun_table.load_global((qst)))
#define mp_load_attr(base, attr) (mp_fun_table.load_attr((base), (attr)))
#define mp_load_method(base, attr, dest) (mp_fun_table.load_method((base), (attr), (dest)))
#define mp_load_super_method(attr, dest) (mp_fun_table.load_super_method((attr), (dest)))
#define mp_store_name(qst, obj) (mp_fun_table.store_name((qst), (obj)))
#define mp_store_global(qst, obj) (mp_fun_table.store_global((qst), (obj)))
#define mp_store_attr(base, attr, val) (mp_fun_table.store_attr((base), (attr), (val)))
#define mp_unary_op(op, obj) (mp_fun_table.unary_op((op), (obj)))
#define mp_binary_op(op, lhs, rhs) (mp_fun_table.binary_op((op), (lhs), (rhs)))
#define mp_make_function_from_raw_code(rc, def_args, def_kw_args) \
(mp_fun_table.make_function_from_raw_code((rc), (def_args), (def_kw_args)))
#define mp_call_function_n_kw(fun, n_args, n_kw, args) \
(mp_fun_table.call_function_n_kw((fun), (n_args) | ((n_kw) << 8), args))
#define mp_arg_check_num(n_args, n_kw, n_args_min, n_args_max, takes_kw) \
(mp_fun_table.arg_check_num_sig((n_args), (n_kw), MP_OBJ_FUN_MAKE_SIG((n_args_min), (n_args_max), (takes_kw))))
#define MP_DYNRUNTIME_INIT_ENTRY \
mp_obj_t old_globals = mp_fun_table.swap_globals(self->globals); \
mp_raw_code_t rc; \
rc.kind = MP_CODE_NATIVE_VIPER; \
rc.scope_flags = 0; \
rc.const_table = (void *)self->const_table; \
(void)rc;
#define MP_DYNRUNTIME_INIT_EXIT \
mp_fun_table.swap_globals(old_globals); \
return mp_const_none;
#define MP_DYNRUNTIME_MAKE_FUNCTION(f) \
(mp_make_function_from_raw_code((rc.fun_data = (f), &rc), MP_OBJ_NULL, MP_OBJ_NULL))
#define mp_import_name(name, fromlist, level) \
(mp_fun_table.import_name((name), (fromlist), (level)))
#define mp_import_from(module, name) \
(mp_fun_table.import_from((module), (name)))
#define mp_import_all(module) \
(mp_fun_table.import_all((module))
/******************************************************************************/
// Exceptions
#define mp_obj_new_exception(o) ((mp_obj_t)(o)) // Assumes returned object will be raised, will create instance then
#define mp_obj_new_exception_arg1(e_type, arg) (mp_obj_new_exception_arg1_dyn((e_type), (arg)))
#define nlr_raise(o) (mp_raise_dyn(o))
#define mp_raise_type_arg(type, arg) (mp_raise_dyn(mp_obj_new_exception_arg1_dyn((type), (arg))))
#define mp_raise_msg(type, msg) (mp_fun_table.raise_msg((type), (msg)))
#define mp_raise_OSError(er) (mp_raise_OSError_dyn(er))
#define mp_raise_NotImplementedError(msg) (mp_raise_msg(&mp_type_NotImplementedError, (msg)))
#define mp_raise_TypeError(msg) (mp_raise_msg(&mp_type_TypeError, (msg)))
#define mp_raise_ValueError(msg) (mp_raise_msg(&mp_type_ValueError, (msg)))
static inline mp_obj_t mp_obj_new_exception_arg1_dyn(const mp_obj_type_t *exc_type, mp_obj_t arg) {
mp_obj_t args[1] = { arg };
return mp_call_function_n_kw(MP_OBJ_FROM_PTR(exc_type), 1, 0, &args[0]);
}
static NORETURN inline void mp_raise_dyn(mp_obj_t o) {
mp_fun_table.raise(o);
for (;;) {
}
}
static inline void mp_raise_OSError_dyn(int er) {
mp_obj_t args[1] = { MP_OBJ_NEW_SMALL_INT(er) };
nlr_raise(mp_call_function_n_kw(mp_load_global(MP_QSTR_OSError), 1, 0, &args[0]));
}
/******************************************************************************/
// Floating point
#define mp_obj_new_float_from_f(f) (mp_fun_table.obj_new_float_from_f((f)))
#define mp_obj_new_float_from_d(d) (mp_fun_table.obj_new_float_from_d((d)))
#define mp_obj_get_float_to_f(o) (mp_fun_table.obj_get_float_to_f((o)))
#define mp_obj_get_float_to_d(o) (mp_fun_table.obj_get_float_to_d((o)))
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
#define mp_obj_new_float(f) (mp_obj_new_float_from_f((f)))
#define mp_obj_get_float(o) (mp_obj_get_float_to_f((o)))
#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
#define mp_obj_new_float(f) (mp_obj_new_float_from_d((f)))
#define mp_obj_get_float(o) (mp_obj_get_float_to_d((o)))
#endif
/******************************************************************************/
// Inline function definitions.
// *items may point inside a GC block
static inline void mp_obj_get_array_dyn(mp_obj_t o, size_t *len, mp_obj_t **items) {
const mp_obj_type_t *type = mp_obj_get_type(o);
if (type == &mp_type_tuple) {
mp_obj_tuple_t *t = MP_OBJ_TO_PTR(o);
*len = t->len;
*items = &t->items[0];
} else if (type == &mp_type_list) {
mp_obj_list_t *l = MP_OBJ_TO_PTR(o);
*len = l->len;
*items = l->items;
} else {
mp_raise_TypeError("expected tuple/list");
}
}
#endif // MICROPY_INCLUDED_PY_DYNRUNTIME_H

145
python/src/py/dynruntime.mk Normal file
View File

@@ -0,0 +1,145 @@
# Makefile fragment for generating native .mpy files from C source
# MPY_DIR must be set to the top of the MicroPython source tree
BUILD ?= build
ECHO = @echo
RM = /bin/rm
MKDIR = /bin/mkdir
PYTHON = python3
MPY_CROSS = $(MPY_DIR)/mpy-cross/mpy-cross
MPY_TOOL = $(PYTHON) $(MPY_DIR)/tools/mpy-tool.py
MPY_LD = $(PYTHON) $(MPY_DIR)/tools/mpy_ld.py
Q = @
ifeq ("$(origin V)", "command line")
ifeq ($(V),1)
Q =
MPY_LD += '-vvv'
endif
endif
ARCH_UPPER = $(shell echo $(ARCH) | tr '[:lower:]' '[:upper:]')
CONFIG_H = $(BUILD)/$(MOD).config.h
CFLAGS += -I. -I$(MPY_DIR)
CFLAGS += -std=c99
CFLAGS += -Os
CFLAGS += -Wall -Werror -DNDEBUG
CFLAGS += -DNO_QSTR
CFLAGS += -DMICROPY_ENABLE_DYNRUNTIME
CFLAGS += -DMP_CONFIGFILE='<$(CONFIG_H)>'
CFLAGS += -fpic -fno-common
CFLAGS += -U _FORTIFY_SOURCE # prevent use of __*_chk libc functions
#CFLAGS += -fdata-sections -ffunction-sections
MPY_CROSS_FLAGS += -march=$(ARCH)
SRC_O += $(addprefix $(BUILD)/, $(patsubst %.c,%.o,$(filter %.c,$(SRC))))
SRC_MPY += $(addprefix $(BUILD)/, $(patsubst %.py,%.mpy,$(filter %.py,$(SRC))))
################################################################################
# Architecture configuration
ifeq ($(ARCH),x86)
# x86
CROSS =
CFLAGS += -m32 -fno-stack-protector
MPY_CROSS_FLAGS += -mcache-lookup-bc
MICROPY_FLOAT_IMPL ?= double
else ifeq ($(ARCH),x64)
# x64
CROSS =
CFLAGS += -fno-stack-protector
MPY_CROSS_FLAGS += -mcache-lookup-bc
MICROPY_FLOAT_IMPL ?= double
else ifeq ($(ARCH),armv7m)
# thumb
CROSS = arm-none-eabi-
CFLAGS += -mthumb -mcpu=cortex-m3
MICROPY_FLOAT_IMPL ?= none
else ifeq ($(ARCH),armv7emsp)
# thumb
CROSS = arm-none-eabi-
CFLAGS += -mthumb -mcpu=cortex-m4
CFLAGS += -mfpu=fpv4-sp-d16 -mfloat-abi=hard
MICROPY_FLOAT_IMPL ?= float
else ifeq ($(ARCH),armv7emdp)
# thumb
CROSS = arm-none-eabi-
CFLAGS += -mthumb -mcpu=cortex-m7
CFLAGS += -mfpu=fpv5-d16 -mfloat-abi=hard
MICROPY_FLOAT_IMPL ?= double
else ifeq ($(ARCH),xtensa)
# xtensa
CROSS = xtensa-lx106-elf-
CFLAGS += -mforce-l32
MICROPY_FLOAT_IMPL ?= none
else ifeq ($(ARCH),xtensawin)
# xtensawin
CROSS = xtensa-esp32-elf-
CFLAGS +=
MICROPY_FLOAT_IMPL ?= float
else
$(error architecture '$(ARCH)' not supported)
endif
MICROPY_FLOAT_IMPL_UPPER = $(shell echo $(MICROPY_FLOAT_IMPL) | tr '[:lower:]' '[:upper:]')
CFLAGS += -DMICROPY_FLOAT_IMPL=MICROPY_FLOAT_IMPL_$(MICROPY_FLOAT_IMPL_UPPER)
CFLAGS += $(CFLAGS_EXTRA)
################################################################################
# Build rules
.PHONY: all clean
all: $(MOD).mpy
clean:
$(RM) -rf $(BUILD) $(CLEAN_EXTRA)
# Create build destination directories first
BUILD_DIRS = $(sort $(dir $(CONFIG_H) $(SRC_O) $(SRC_MPY)))
$(CONFIG_H) $(SRC_O) $(SRC_MPY): | $(BUILD_DIRS)
$(BUILD_DIRS):
$(Q)$(MKDIR) -p $@
# Preprocess all source files to generate $(CONFIG_H)
$(CONFIG_H): $(SRC)
$(ECHO) "GEN $@"
$(Q)$(MPY_LD) --arch $(ARCH) --preprocess -o $@ $^
# Build .o from .c source files
$(BUILD)/%.o: %.c $(CONFIG_H) Makefile
$(ECHO) "CC $<"
$(Q)$(CROSS)gcc $(CFLAGS) -o $@ -c $<
# Build .mpy from .py source files
$(BUILD)/%.mpy: %.py
$(ECHO) "MPY $<"
$(Q)$(MPY_CROSS) $(MPY_CROSS_FLAGS) -o $@ $<
# Build native .mpy from object files
$(BUILD)/$(MOD).native.mpy: $(SRC_O)
$(ECHO) "LINK $<"
$(Q)$(MPY_LD) --arch $(ARCH) --qstrs $(CONFIG_H) -o $@ $^
# Build final .mpy from all intermediate .mpy files
$(MOD).mpy: $(BUILD)/$(MOD).native.mpy $(SRC_MPY)
$(ECHO) "GEN $@"
$(Q)$(MPY_TOOL) --merge -o $@ $^

View File

@@ -99,7 +99,7 @@ typedef struct _mp_emit_method_table_id_ops_t {
typedef struct _emit_method_table_t {
#if MICROPY_DYNAMIC_COMPILER
emit_t *(*emit_new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
emit_t *(*emit_new)(mp_obj_t * error_slot, uint *label_slot, mp_uint_t max_num_labels);
void (*emit_free)(emit_t *emit);
#endif
@@ -188,7 +188,7 @@ emit_t *emit_native_arm_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t ma
emit_t *emit_native_xtensa_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
emit_t *emit_native_xtensawin_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
void emit_bc_set_max_num_labels(emit_t* emit, mp_uint_t max_num_labels);
void emit_bc_set_max_num_labels(emit_t *emit, mp_uint_t max_num_labels);
void emit_bc_free(emit_t *emit);
void emit_native_x64_free(emit_t *emit);

View File

@@ -36,7 +36,7 @@
#if MICROPY_ENABLE_COMPILER
#define BYTES_FOR_INT ((BYTES_PER_WORD * 8 + 6) / 7)
#define BYTES_FOR_INT ((MP_BYTES_PER_OBJ_WORD * 8 + 6) / 7)
#define DUMMY_DATA_SIZE (BYTES_FOR_INT)
struct _emit_t {
@@ -110,7 +110,6 @@ STATIC void emit_write_uint(emit_t *emit, emit_allocator_t allocator, mp_uint_t
// all functions must go through this one to emit code info
STATIC byte *emit_get_cur_to_write_code_info(emit_t *emit, int num_bytes_to_write) {
//printf("emit %d\n", num_bytes_to_write);
if (emit->pass < MP_PASS_EMIT) {
emit->code_info_offset += num_bytes_to_write;
return emit->dummy_data;
@@ -122,7 +121,7 @@ STATIC byte *emit_get_cur_to_write_code_info(emit_t *emit, int num_bytes_to_writ
}
}
STATIC void emit_write_code_info_byte(emit_t* emit, byte val) {
STATIC void emit_write_code_info_byte(emit_t *emit, byte val) {
*emit_get_cur_to_write_code_info(emit, 1) = val;
}
@@ -140,7 +139,6 @@ STATIC void emit_write_code_info_qstr(emit_t *emit, qstr qst) {
#if MICROPY_ENABLE_SOURCE_LINE
STATIC void emit_write_code_info_bytes_lines(emit_t *emit, mp_uint_t bytes_to_skip, mp_uint_t lines_to_skip) {
assert(bytes_to_skip > 0 || lines_to_skip > 0);
//printf(" %d %d\n", bytes_to_skip, lines_to_skip);
while (bytes_to_skip > 0 || lines_to_skip > 0) {
mp_uint_t b, l;
if (lines_to_skip <= 6 || bytes_to_skip > 0xf) {
@@ -169,7 +167,6 @@ STATIC void emit_write_code_info_bytes_lines(emit_t *emit, mp_uint_t bytes_to_sk
// all functions must go through this one to emit byte code
STATIC byte *emit_get_cur_to_write_bytecode(emit_t *emit, int num_bytes_to_write) {
//printf("emit %d\n", num_bytes_to_write);
if (emit->pass < MP_PASS_EMIT) {
emit->bytecode_offset += num_bytes_to_write;
return emit->dummy_data;
@@ -233,7 +230,7 @@ STATIC void emit_write_bytecode_byte_const(emit_t *emit, int stack_adj, byte b,
}
#endif
STATIC void emit_write_bytecode_byte_qstr(emit_t* emit, int stack_adj, byte b, qstr qst) {
STATIC void emit_write_bytecode_byte_qstr(emit_t *emit, int stack_adj, byte b, qstr qst) {
#if MICROPY_PERSISTENT_CODE
assert((qst >> 16) == 0);
mp_emit_bc_adjust_stack_size(emit, stack_adj);
@@ -255,7 +252,7 @@ STATIC void emit_write_bytecode_byte_obj(emit_t *emit, int stack_adj, byte b, mp
// aligns the pointer so it is friendly to GC
emit_write_bytecode_byte(emit, stack_adj, b);
emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(mp_obj_t));
mp_obj_t *c = (mp_obj_t*)emit_get_cur_to_write_bytecode(emit, sizeof(mp_obj_t));
mp_obj_t *c = (mp_obj_t *)emit_get_cur_to_write_bytecode(emit, sizeof(mp_obj_t));
// Verify thar c is already uint-aligned
assert(c == MP_ALIGN(c, sizeof(mp_obj_t)));
*c = obj;
@@ -270,10 +267,10 @@ STATIC void emit_write_bytecode_byte_raw_code(emit_t *emit, int stack_adj, byte
#else
// aligns the pointer so it is friendly to GC
emit_write_bytecode_byte(emit, stack_adj, b);
emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(void*));
void **c = (void**)emit_get_cur_to_write_bytecode(emit, sizeof(void*));
emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(void *));
void **c = (void **)emit_get_cur_to_write_bytecode(emit, sizeof(void *));
// Verify thar c is already uint-aligned
assert(c == MP_ALIGN(c, sizeof(void*)));
assert(c == MP_ALIGN(c, sizeof(void *)));
*c = rc;
#endif
#if MICROPY_PY_SYS_SETTRACE
@@ -470,8 +467,7 @@ void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta) {
}
void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
//printf("source: line %d -> %d offset %d -> %d\n", emit->last_source_line, source_line, emit->last_source_line_offset, emit->bytecode_offset);
#if MICROPY_ENABLE_SOURCE_LINE
#if MICROPY_ENABLE_SOURCE_LINE
if (MP_STATE_VM(mp_optimise_value) >= 3) {
// If we compile with -O3, don't store line numbers.
return;
@@ -483,10 +479,10 @@ void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
emit->last_source_line_offset = emit->bytecode_offset;
emit->last_source_line = source_line;
}
#else
#else
(void)emit;
(void)source_line;
#endif
#endif
}
void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l) {
@@ -944,4 +940,4 @@ const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops = {
};
#endif
#endif //MICROPY_ENABLE_COMPILER
#endif // MICROPY_ENABLE_COMPILER

View File

@@ -84,17 +84,17 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code,
mp_prof_extract_prelude(code, prelude);
#endif
#ifdef DEBUG_PRINT
#ifdef DEBUG_PRINT
#if !MICROPY_DEBUG_PRINTERS
const size_t len = 0;
#endif
DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " flags=%x\n", code, len, (uint)scope_flags);
#endif
#if MICROPY_DEBUG_PRINTERS
#endif
#if MICROPY_DEBUG_PRINTERS
if (mp_verbose_flag >= 2) {
mp_bytecode_print(rc, code, len, const_table);
mp_bytecode_print(&mp_plat_print, rc, code, len, const_table);
}
#endif
#endif
}
#if MICROPY_EMIT_MACHINE_CODE
@@ -108,6 +108,31 @@ void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void
assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM);
// Some architectures require flushing/invalidation of the I/D caches,
// so that the generated native code which was created in data RAM will
// be available for execution from instruction RAM.
#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
#if __ICACHE_PRESENT == 1
// Flush D-cache, so the code emitted is stored in RAM.
MP_HAL_CLEAN_DCACHE(fun_data, fun_len);
// Invalidate I-cache, so the newly-created code is reloaded from RAM.
SCB_InvalidateICache();
#endif
#elif MICROPY_EMIT_ARM
#if (defined(__linux__) && defined(__GNUC__)) || __ARM_ARCH == 7
__builtin___clear_cache(fun_data, (uint8_t *)fun_data + fun_len);
#elif defined(__arm__)
// Flush I-cache and D-cache.
asm volatile (
"0:"
"mrc p15, 0, r15, c7, c10, 3\n" // test and clean D-cache
"bne 0b\n"
"mov r0, #0\n"
"mcr p15, 0, r0, c7, c7, 0\n" // invalidate I-cache and D-cache
: : : "r0", "cc");
#endif
#endif
rc->kind = kind;
rc->scope_flags = scope_flags;
rc->n_pos_args = n_pos_args;
@@ -120,28 +145,28 @@ void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void
rc->prelude_offset = prelude_offset;
rc->n_obj = n_obj;
rc->n_raw_code = n_raw_code;
rc->n_qstr= n_qstr;
rc->n_qstr = n_qstr;
rc->qstr_link = qstr_link;
#endif
#ifdef DEBUG_PRINT
#ifdef DEBUG_PRINT
DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " n_pos_args=" UINT_FMT " flags=%x\n", kind, fun_data, fun_len, n_pos_args, (uint)scope_flags);
for (mp_uint_t i = 0; i < fun_len; i++) {
if (i > 0 && i % 16 == 0) {
DEBUG_printf("\n");
}
DEBUG_printf(" %02x", ((byte*)fun_data)[i]);
DEBUG_printf(" %02x", ((byte *)fun_data)[i]);
}
DEBUG_printf("\n");
#ifdef WRITE_CODE
#ifdef WRITE_CODE
FILE *fp_write_code = fopen("out-code", "wb");
fwrite(fun_data, fun_len, 1, fp_write_code);
fclose(fp_write_code);
#endif
#else
#endif
#else
(void)fun_len;
#endif
#endif
}
#endif
@@ -164,7 +189,7 @@ mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_ar
fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->fun_data, rc->const_table);
// Check for a generator function, and if so change the type of the object
if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) {
((mp_obj_base_t*)MP_OBJ_TO_PTR(fun))->type = &mp_type_native_gen_wrap;
((mp_obj_base_t *)MP_OBJ_TO_PTR(fun))->type = &mp_type_native_gen_wrap;
}
break;
#endif
@@ -179,7 +204,7 @@ mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_ar
fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->fun_data, rc->const_table);
// check for generator functions and if so change the type of the object
if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) {
((mp_obj_base_t*)MP_OBJ_TO_PTR(fun))->type = &mp_type_gen_wrap;
((mp_obj_base_t *)MP_OBJ_TO_PTR(fun))->type = &mp_type_gen_wrap;
}
#if MICROPY_PY_SYS_SETTRACE

View File

@@ -39,14 +39,14 @@ typedef enum {
// define rules with a compile function
#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
#define DEF_RULE_NC(rule, kind, ...)
#include "py/grammar.h"
#include "py/grammar.h"
#undef DEF_RULE
#undef DEF_RULE_NC
PN_const_object, // special node for a constant, generic Python object
// define rules without a compile function
#define DEF_RULE(rule, comp, kind, ...)
#define DEF_RULE_NC(rule, kind, ...) PN_##rule,
#include "py/grammar.h"
#include "py/grammar.h"
#undef DEF_RULE
#undef DEF_RULE_NC
} pn_kind_t;
@@ -59,7 +59,7 @@ struct _emit_inline_asm_t {
qstr *label_lookup;
};
STATIC void emit_inline_thumb_error_msg(emit_inline_asm_t *emit, const char *msg) {
STATIC void emit_inline_thumb_error_msg(emit_inline_asm_t *emit, mp_rom_error_text_t msg) {
*emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
}
@@ -99,17 +99,17 @@ STATIC void emit_inline_thumb_end_pass(emit_inline_asm_t *emit, mp_uint_t type_s
STATIC mp_uint_t emit_inline_thumb_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
if (n_params > 4) {
emit_inline_thumb_error_msg(emit, "can only have up to 4 parameters to Thumb assembly");
emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters to Thumb assembly"));
return 0;
}
for (mp_uint_t i = 0; i < n_params; i++) {
if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
emit_inline_thumb_error_msg(emit, "parameters must be registers in sequence r0 to r3");
emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence r0 to r3"));
return 0;
}
const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
if (!(strlen(p) == 2 && p[0] == 'r' && p[1] == '0' + i)) {
emit_inline_thumb_error_msg(emit, "parameters must be registers in sequence r0 to r3");
if (!(strlen(p) == 2 && p[0] == 'r' && (mp_uint_t)p[1] == '0' + i)) {
emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence r0 to r3"));
return 0;
}
}
@@ -131,7 +131,9 @@ STATIC bool emit_inline_thumb_label(emit_inline_asm_t *emit, mp_uint_t label_num
return true;
}
typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t;
typedef struct _reg_name_t { byte reg;
byte name[3];
} reg_name_t;
STATIC const reg_name_t reg_name_table[] = {
{0, "r0\0"},
{1, "r1\0"},
@@ -157,7 +159,9 @@ STATIC const reg_name_t reg_name_table[] = {
};
#define MAX_SPECIAL_REGISTER_NAME_LENGTH 7
typedef struct _special_reg_name_t { byte reg; char name[MAX_SPECIAL_REGISTER_NAME_LENGTH + 1]; } special_reg_name_t;
typedef struct _special_reg_name_t { byte reg;
char name[MAX_SPECIAL_REGISTER_NAME_LENGTH + 1];
} special_reg_name_t;
STATIC const special_reg_name_t special_reg_name_table[] = {
{5, "IPSR"},
{17, "BASEPRI"},
@@ -185,7 +189,7 @@ STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_n
if (r->reg > max_reg) {
emit_inline_thumb_error_exc(emit,
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
"'%s' expects at most r%d", op, max_reg));
MP_ERROR_TEXT("'%s' expects at most r%d"), op, max_reg));
return 0;
} else {
return r->reg;
@@ -194,7 +198,7 @@ STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_n
}
emit_inline_thumb_error_exc(emit,
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
"'%s' expects a register", op));
MP_ERROR_TEXT("'%s' expects a register"), op));
return 0;
}
@@ -208,7 +212,7 @@ STATIC mp_uint_t get_arg_special_reg(emit_inline_asm_t *emit, const char *op, mp
}
emit_inline_thumb_error_exc(emit,
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
"'%s' expects a special register", op));
MP_ERROR_TEXT("'%s' expects a special register"), op));
return 0;
}
@@ -226,8 +230,8 @@ STATIC mp_uint_t get_arg_vfpreg(emit_inline_asm_t *emit, const char *op, mp_pars
}
if (regno > 31) {
emit_inline_thumb_error_exc(emit,
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
"'%s' expects at most r%d", op, 31));
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
MP_ERROR_TEXT("'%s' expects at most r%d"), op, 31));
return 0;
} else {
return regno;
@@ -235,8 +239,8 @@ STATIC mp_uint_t get_arg_vfpreg(emit_inline_asm_t *emit, const char *op, mp_pars
}
malformed:
emit_inline_thumb_error_exc(emit,
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
"'%s' expects an FPU register", op));
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
MP_ERROR_TEXT("'%s' expects an FPU register"), op));
return 0;
}
#endif
@@ -248,7 +252,7 @@ STATIC mp_uint_t get_arg_reglist(emit_inline_asm_t *emit, const char *op, mp_par
goto bad_arg;
}
mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 1); // should always be
pn = pns->nodes[0];
@@ -258,10 +262,10 @@ STATIC mp_uint_t get_arg_reglist(emit_inline_asm_t *emit, const char *op, mp_par
// set with one element
reglist |= 1 << get_arg_reg(emit, op, pn, 15);
} else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
pns = (mp_parse_node_struct_t*)pn;
pns = (mp_parse_node_struct_t *)pn;
if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t*)pns->nodes[1];
mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
// set with multiple elements
@@ -289,19 +293,19 @@ STATIC mp_uint_t get_arg_reglist(emit_inline_asm_t *emit, const char *op, mp_par
return reglist;
bad_arg:
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects {r0, r1, ...}", op));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects {r0, r1, ...}"), op));
return 0;
}
STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, uint32_t fit_mask) {
mp_obj_t o;
if (!mp_parse_node_get_int_maybe(pn, &o)) {
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an integer", op));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an integer"), op));
return 0;
}
uint32_t i = mp_obj_get_int_truncated(o);
if ((i & (~fit_mask)) != 0) {
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer 0x%x doesn't fit in mask 0x%x", op, i, fit_mask));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' integer 0x%x doesn't fit in mask 0x%x"), op, i, fit_mask));
return 0;
}
return i;
@@ -311,11 +315,11 @@ STATIC bool get_arg_addr(emit_inline_asm_t *emit, const char *op, mp_parse_node_
if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_bracket)) {
goto bad_arg;
}
mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
goto bad_arg;
}
pns = (mp_parse_node_struct_t*)pns->nodes[0];
pns = (mp_parse_node_struct_t *)pns->nodes[0];
if (MP_PARSE_NODE_STRUCT_NUM_NODES(pns) != 2) {
goto bad_arg;
}
@@ -325,13 +329,13 @@ STATIC bool get_arg_addr(emit_inline_asm_t *emit, const char *op, mp_parse_node_
return true;
bad_arg:
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an address of the form [a, b]", op));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an address of the form [a, b]"), op));
return false;
}
STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
if (!MP_PARSE_NODE_IS_ID(pn)) {
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects a label", op));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects a label"), op));
return 0;
}
qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
@@ -342,12 +346,14 @@ STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_
}
// only need to have the labels on the last pass
if (emit->pass == MP_PASS_EMIT) {
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "label '%q' not defined", label_qstr));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("label '%q' not defined"), label_qstr));
}
return 0;
}
typedef struct _cc_name_t { byte cc; byte name[2]; } cc_name_t;
typedef struct _cc_name_t { byte cc;
byte name[2];
} cc_name_t;
STATIC const cc_name_t cc_name_table[] = {
{ ASM_THUMB_CC_EQ, "eq" },
{ ASM_THUMB_CC_NE, "ne" },
@@ -365,7 +371,9 @@ STATIC const cc_name_t cc_name_table[] = {
{ ASM_THUMB_CC_LE, "le" },
};
typedef struct _format_4_op_t { byte op; char name[3]; } format_4_op_t;
typedef struct _format_4_op_t { byte op;
char name[3];
} format_4_op_t;
#define X(x) (((x) >> 4) & 0xff) // only need 1 byte to distinguish these ops
STATIC const format_4_op_t format_4_op_table[] = {
{ X(ASM_THUMB_FORMAT_4_EOR), "eor" },
@@ -387,7 +395,9 @@ STATIC const format_4_op_t format_4_op_table[] = {
#undef X
// name is actually a qstr, which should fit in 16 bits
typedef struct _format_9_10_op_t { uint16_t op; uint16_t name; } format_9_10_op_t;
typedef struct _format_9_10_op_t { uint16_t op;
uint16_t name;
} format_9_10_op_t;
#define X(x) (x)
STATIC const format_9_10_op_t format_9_10_op_table[] = {
{ X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_ldr },
@@ -401,7 +411,9 @@ STATIC const format_9_10_op_t format_9_10_op_table[] = {
#if MICROPY_EMIT_INLINE_THUMB_FLOAT
// actual opcodes are: 0xee00 | op.hi_nibble, 0x0a00 | op.lo_nibble
typedef struct _format_vfp_op_t { byte op; char name[3]; } format_vfp_op_t;
typedef struct _format_vfp_op_t { byte op;
char name[3];
} format_vfp_op_t;
STATIC const format_vfp_op_t format_vfp_op_table[] = {
{ 0x30, "add" },
{ 0x34, "sub" },
@@ -425,7 +437,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
// "subs", RLO, RLO, I3, asm_thumb_subs_reg_reg_i3
size_t op_len;
const char *op_str = (const char*)qstr_data(op, &op_len);
const char *op_str = (const char *)qstr_data(op, &op_len);
#if MICROPY_EMIT_INLINE_THUMB_FLOAT
if (op_str[0] == 'v') {
@@ -434,7 +446,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
mp_uint_t op_code = 0x0ac0, op_code_hi;
if (op == MP_QSTR_vcmp) {
op_code_hi = 0xeeb4;
op_vfp_twoargs:;
op_vfp_twoargs:;
mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
asm_thumb_op32(&emit->as,
@@ -485,7 +497,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
0x0a10 | (r_arm << 12) | ((vm & 1) << 7));
} else if (op == MP_QSTR_vldr) {
op_code_hi = 0xed90;
op_vldr_vstr:;
op_vldr_vstr:;
mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
mp_parse_node_t pn_base, pn_offset;
if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
@@ -521,8 +533,10 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
} else {
goto unknown_op;
}
} else
return;
}
#endif
if (n_args == 0) {
if (op == MP_QSTR_nop) {
asm_thumb_op16(&emit->as, ASM_THUMB_OP_NOP);
@@ -547,8 +561,8 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
mp_uint_t r = get_arg_reg(emit, op_str, pn_args[0], 15);
asm_thumb_op16(&emit->as, 0x4700 | (r << 3));
} else if (op_str[0] == 'b' && (op_len == 3
|| (op_len == 5 && op_str[3] == '_'
&& (op_str[4] == 'n' || (ARMV7M && op_str[4] == 'w'))))) {
|| (op_len == 5 && op_str[3] == '_'
&& (op_str[4] == 'n' || (ARMV7M && op_str[4] == 'w'))))) {
mp_uint_t cc = -1;
for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
if (op_str[1] == cc_name_table[i].name[0] && op_str[2] == cc_name_table[i].name[1]) {
@@ -559,7 +573,11 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
goto unknown_op;
}
int label_num = get_arg_label(emit, op_str, pn_args[0]);
if (!asm_thumb_bcc_nw_label(&emit->as, cc, label_num, op_len == 5 && op_str[4] == 'w')) {
bool wide = op_len == 5 && op_str[4] == 'w';
if (wide && !ARMV7M) {
goto unknown_op;
}
if (!asm_thumb_bcc_nw_label(&emit->as, cc, label_num, wide)) {
goto branch_not_in_range;
}
} else if (ARMV7M && op_str[0] == 'i' && op_str[1] == 't') {
@@ -637,7 +655,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
op_code_hi = 0xfab0;
op_code = 0xf080;
mp_uint_t rd, rm;
op_clz_rbit:
op_clz_rbit:
rd = get_arg_reg(emit, op_str, pn_args[0], 15);
rm = get_arg_reg(emit, op_str, pn_args[1], 15);
asm_thumb_op32(&emit->as, op_code_hi | rm, op_code | (rd << 8) | rm);
@@ -645,7 +663,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
op_code_hi = 0xfa90;
op_code = 0xf0a0;
goto op_clz_rbit;
} else if (ARMV7M && op == MP_QSTR_mrs){
} else if (ARMV7M && op == MP_QSTR_mrs) {
mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 12);
mp_uint_t reg_src = get_arg_special_reg(emit, op_str, pn_args[1]);
asm_thumb_op32(&emit->as, 0xf3ef, 0x8000 | (reg_dest << 8) | reg_src);
@@ -653,7 +671,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
if (op == MP_QSTR_and_) {
op_code = ASM_THUMB_FORMAT_4_AND;
mp_uint_t reg_dest, reg_src;
op_format_4:
op_format_4:
reg_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
reg_src = get_arg_reg(emit, op_str, pn_args[1], 7);
asm_thumb_format_4(&emit->as, op_code, reg_dest, reg_src);
@@ -674,7 +692,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
if (op == MP_QSTR_mov) {
op_code = ASM_THUMB_FORMAT_3_MOV;
mp_uint_t rlo_dest, i8_src;
op_format_3:
op_format_3:
rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
i8_src = get_arg_i(emit, op_str, pn_args[1], 0xff);
asm_thumb_format_3(&emit->as, op_code, rlo_dest, i8_src);
@@ -687,23 +705,24 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
} else if (op == MP_QSTR_sub) {
op_code = ASM_THUMB_FORMAT_3_SUB;
goto op_format_3;
} else if (ARMV7M && op == MP_QSTR_movw) {
#if ARMV7M
} else if (op == MP_QSTR_movw) {
op_code = ASM_THUMB_OP_MOVW;
mp_uint_t reg_dest;
op_movw_movt:
op_movw_movt:
reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
int i_src = get_arg_i(emit, op_str, pn_args[1], 0xffff);
asm_thumb_mov_reg_i16(&emit->as, op_code, reg_dest, i_src);
} else if (ARMV7M && op == MP_QSTR_movt) {
} else if (op == MP_QSTR_movt) {
op_code = ASM_THUMB_OP_MOVT;
goto op_movw_movt;
} else if (ARMV7M && op == MP_QSTR_movwt) {
} else if (op == MP_QSTR_movwt) {
// this is a convenience instruction
mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
uint32_t i_src = get_arg_i(emit, op_str, pn_args[1], 0xffffffff);
asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVW, reg_dest, i_src & 0xffff);
asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVT, reg_dest, (i_src >> 16) & 0xffff);
} else if (ARMV7M && op == MP_QSTR_ldrex) {
} else if (op == MP_QSTR_ldrex) {
mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
mp_parse_node_t pn_base, pn_offset;
if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
@@ -711,6 +730,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
asm_thumb_op32(&emit->as, 0xe850 | r_base, 0x0f00 | (r_dest << 12) | i8);
}
#endif
} else {
// search table for ldr/str instructions
for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_9_10_op_table); i++) {
@@ -743,7 +763,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
if (op == MP_QSTR_lsl) {
op_code = ASM_THUMB_FORMAT_1_LSL;
mp_uint_t rlo_dest, rlo_src, i5;
op_format_1:
op_format_1:
rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
i5 = get_arg_i(emit, op_str, pn_args[2], 0x1f);
@@ -757,7 +777,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
} else if (op == MP_QSTR_add) {
op_code = ASM_THUMB_FORMAT_2_ADD;
mp_uint_t rlo_dest, rlo_src;
op_format_2:
op_format_2:
rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
int src_b;
@@ -772,7 +792,7 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
} else if (ARMV7M && op == MP_QSTR_sdiv) {
op_code = 0xfb90; // sdiv high part
mp_uint_t rd, rn, rm;
op_sdiv_udiv:
op_sdiv_udiv:
rd = get_arg_reg(emit, op_str, pn_args[0], 15);
rn = get_arg_reg(emit, op_str, pn_args[1], 15);
rm = get_arg_reg(emit, op_str, pn_args[2], 15);
@@ -803,11 +823,11 @@ STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_a
return;
unknown_op:
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "unsupported Thumb instruction '%s' with %d arguments", op_str, n_args));
emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("unsupported Thumb instruction '%s' with %d arguments"), op_str, n_args));
return;
branch_not_in_range:
emit_inline_thumb_error_msg(emit, "branch not in range");
emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("branch not in range"));
return;
}

View File

@@ -43,7 +43,7 @@ struct _emit_inline_asm_t {
qstr *label_lookup;
};
STATIC void emit_inline_xtensa_error_msg(emit_inline_asm_t *emit, const char *msg) {
STATIC void emit_inline_xtensa_error_msg(emit_inline_asm_t *emit, mp_rom_error_text_t msg) {
*emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
}
@@ -83,17 +83,17 @@ STATIC void emit_inline_xtensa_end_pass(emit_inline_asm_t *emit, mp_uint_t type_
STATIC mp_uint_t emit_inline_xtensa_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
if (n_params > 4) {
emit_inline_xtensa_error_msg(emit, "can only have up to 4 parameters to Xtensa assembly");
emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters to Xtensa assembly"));
return 0;
}
for (mp_uint_t i = 0; i < n_params; i++) {
if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
emit_inline_xtensa_error_msg(emit, "parameters must be registers in sequence a2 to a5");
emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a2 to a5"));
return 0;
}
const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
if (!(strlen(p) == 2 && p[0] == 'a' && p[1] == '2' + i)) {
emit_inline_xtensa_error_msg(emit, "parameters must be registers in sequence a2 to a5");
if (!(strlen(p) == 2 && p[0] == 'a' && (mp_uint_t)p[1] == '2' + i)) {
emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a2 to a5"));
return 0;
}
}
@@ -115,7 +115,9 @@ STATIC bool emit_inline_xtensa_label(emit_inline_asm_t *emit, mp_uint_t label_nu
return true;
}
typedef struct _reg_name_t { byte reg; byte name[3]; } reg_name_t;
typedef struct _reg_name_t { byte reg;
byte name[3];
} reg_name_t;
STATIC const reg_name_t reg_name_table[] = {
{0, "a0\0"},
{1, "a1\0"},
@@ -159,19 +161,19 @@ STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_n
}
emit_inline_xtensa_error_exc(emit,
mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
"'%s' expects a register", op));
MP_ERROR_TEXT("'%s' expects a register"), op));
return 0;
}
STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, int min, int max) {
mp_obj_t o;
if (!mp_parse_node_get_int_maybe(pn, &o)) {
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects an integer", op));
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an integer"), op));
return 0;
}
uint32_t i = mp_obj_get_int_truncated(o);
if (min != max && ((int)i < min || (int)i > max)) {
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' integer %d isn't within range %d..%d", op, i, min, max));
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' integer %d isn't within range %d..%d"), op, i, min, max));
return 0;
}
return i;
@@ -179,7 +181,7 @@ STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node
STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
if (!MP_PARSE_NODE_IS_ID(pn)) {
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "'%s' expects a label", op));
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects a label"), op));
return 0;
}
qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
@@ -190,7 +192,7 @@ STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_
}
// only need to have the labels on the last pass
if (emit->pass == MP_PASS_EMIT) {
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "label '%q' not defined", label_qstr));
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("label '%q' not defined"), label_qstr));
}
return 0;
}
@@ -242,7 +244,7 @@ STATIC const opcode_table_3arg_t opcode_table_3arg[] = {
STATIC void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
size_t op_len;
const char *op_str = (const char*)qstr_data(op, &op_len);
const char *op_str = (const char *)qstr_data(op, &op_len);
if (n_args == 0) {
if (op == MP_QSTR_ret_n) {
@@ -324,12 +326,12 @@ STATIC void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_
return;
unknown_op:
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, "unsupported Xtensa instruction '%s' with %d arguments", op_str, n_args));
emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("unsupported Xtensa instruction '%s' with %d arguments"), op_str, n_args));
return;
/*
branch_not_in_range:
emit_inline_xtensa_error_msg(emit, "branch not in range");
emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("branch not in range"));
return;
*/
}

View File

@@ -135,7 +135,7 @@
#define EMIT_NATIVE_VIPER_TYPE_ERROR(emit, ...) do { \
*emit->error_slot = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, __VA_ARGS__); \
} while (0)
} while (0)
typedef enum {
STACK_VALUE,
@@ -163,15 +163,25 @@ typedef enum {
STATIC qstr vtype_to_qstr(vtype_kind_t vtype) {
switch (vtype) {
case VTYPE_PYOBJ: return MP_QSTR_object;
case VTYPE_BOOL: return MP_QSTR_bool;
case VTYPE_INT: return MP_QSTR_int;
case VTYPE_UINT: return MP_QSTR_uint;
case VTYPE_PTR: return MP_QSTR_ptr;
case VTYPE_PTR8: return MP_QSTR_ptr8;
case VTYPE_PTR16: return MP_QSTR_ptr16;
case VTYPE_PTR32: return MP_QSTR_ptr32;
case VTYPE_PTR_NONE: default: return MP_QSTR_None;
case VTYPE_PYOBJ:
return MP_QSTR_object;
case VTYPE_BOOL:
return MP_QSTR_bool;
case VTYPE_INT:
return MP_QSTR_int;
case VTYPE_UINT:
return MP_QSTR_uint;
case VTYPE_PTR:
return MP_QSTR_ptr;
case VTYPE_PTR8:
return MP_QSTR_ptr8;
case VTYPE_PTR16:
return MP_QSTR_ptr16;
case VTYPE_PTR32:
return MP_QSTR_ptr32;
case VTYPE_PTR_NONE:
default:
return MP_QSTR_None;
}
}
@@ -201,6 +211,7 @@ struct _emit_t {
int pass;
bool do_viper_types;
bool prelude_offset_uses_u16_encoding;
mp_uint_t local_vtype_alloc;
vtype_kind_t *local_vtype;
@@ -244,7 +255,7 @@ STATIC void emit_native_global_exc_entry(emit_t *emit);
STATIC void emit_native_global_exc_exit(emit_t *emit);
STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj);
emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels) {
emit_t *EXPORT_FUN(new)(mp_obj_t * error_slot, uint *label_slot, mp_uint_t max_num_labels) {
emit_t *emit = m_new0(emit_t, 1);
emit->error_slot = error_slot;
emit->label_slot = label_slot;
@@ -257,7 +268,7 @@ emit_t *EXPORT_FUN(new)(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_nu
return emit;
}
void EXPORT_FUN(free)(emit_t *emit) {
void EXPORT_FUN(free)(emit_t * emit) {
mp_asm_base_deinit(&emit->as->base, false);
m_del_obj(ASM_T, emit->as);
m_del(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc);
@@ -329,6 +340,18 @@ STATIC void emit_native_mov_reg_qstr_obj(emit_t *emit, int reg_dest, qstr qst) {
emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
} while (false)
#define emit_native_mov_state_imm_fix_u16_via(emit, local_num, imm, reg_temp) \
do { \
ASM_MOV_REG_IMM_FIX_U16((emit)->as, (reg_temp), (imm)); \
emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
} while (false)
#define emit_native_mov_state_imm_fix_word_via(emit, local_num, imm, reg_temp) \
do { \
ASM_MOV_REG_IMM_FIX_WORD((emit)->as, (reg_temp), (imm)); \
emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
} while (false)
STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
DEBUG_printf("start_pass(pass=%u, scope=%p)\n", pass, scope);
@@ -539,16 +562,27 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_PARENT_ARG_1);
// Set code_state.ip (offset from start of this function to prelude info)
int code_state_ip_local = emit->code_state_start + OFFSETOF_CODE_STATE_IP;
#if N_PRELUDE_AS_BYTES_OBJ
// Prelude is a bytes object in const_table; store ip = prelude->data - fun_bc->bytecode
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_LOCAL_3, emit->scope->num_pos_args + emit->scope->num_kwonly_args + 1);
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_LOCAL_3, offsetof(mp_obj_str_t, data) / sizeof(uintptr_t));
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_PARENT_ARG_1, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_BYTECODE);
ASM_SUB_REG_REG(emit->as, REG_LOCAL_3, REG_PARENT_ARG_1);
emit_native_mov_state_reg(emit, emit->code_state_start + OFFSETOF_CODE_STATE_IP, REG_LOCAL_3);
emit_native_mov_state_reg(emit, code_state_ip_local, REG_LOCAL_3);
#else
// TODO this encoding may change size in the final pass, need to make it fixed
emit_native_mov_state_imm_via(emit, emit->code_state_start + OFFSETOF_CODE_STATE_IP, emit->prelude_offset, REG_PARENT_ARG_1);
if (emit->pass == MP_PASS_CODE_SIZE) {
// Commit to the encoding size based on the value of prelude_offset in this pass.
// By using 32768 as the cut-off it is highly unlikely that prelude_offset will
// grow beyond 65535 by the end of thiss pass, and so require the larger encoding.
emit->prelude_offset_uses_u16_encoding = emit->prelude_offset < 32768;
}
if (emit->prelude_offset_uses_u16_encoding) {
assert(emit->prelude_offset <= 65535);
emit_native_mov_state_imm_fix_u16_via(emit, code_state_ip_local, emit->prelude_offset, REG_PARENT_ARG_1);
} else {
emit_native_mov_state_imm_fix_word_via(emit, code_state_ip_local, emit->prelude_offset, REG_PARENT_ARG_1);
}
#endif
// Set code_state.n_state (only works on little endian targets due to n_state being uint16_t)
@@ -733,14 +767,14 @@ STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) {
if (emit->pass > MP_PASS_SCOPE && emit->stack_size > emit->scope->stack_size) {
emit->scope->stack_size = emit->stack_size;
}
#ifdef DEBUG_PRINT
#ifdef DEBUG_PRINT
DEBUG_printf(" adjust_stack; stack_size=%d+%d; stack now:", emit->stack_size - stack_size_delta, stack_size_delta);
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->data.u_reg);
}
DEBUG_printf("\n");
#endif
#endif
}
STATIC void emit_native_adjust_stack_size(emit_t *emit, mp_int_t delta) {
@@ -807,10 +841,13 @@ STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
}
}
// Ensures all unsettled registers that hold Python values are copied to the
// concrete Python stack. All registers are then free to use.
STATIC void need_reg_all(emit_t *emit) {
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_REG) {
DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
si->kind = STACK_VALUE;
emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
}
@@ -831,29 +868,27 @@ STATIC vtype_kind_t load_reg_stack_imm(emit_t *emit, int reg_dest, const stack_i
} else if (si->vtype == VTYPE_PTR_NONE) {
emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_NONE_OBJ);
} else {
mp_raise_NotImplementedError("conversion to object");
mp_raise_NotImplementedError(MP_ERROR_TEXT("conversion to object"));
}
return VTYPE_PYOBJ;
}
}
// Copies all unsettled registers and immediates that are Python values into the
// concrete Python stack. This ensures the concrete Python stack holds valid
// values for the current stack_size.
// This function may clobber REG_TEMP1.
STATIC void need_stack_settled(emit_t *emit) {
DEBUG_printf(" need_stack_settled; stack_size=%d\n", emit->stack_size);
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_REG) {
DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
si->kind = STACK_VALUE;
emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
}
}
need_reg_all(emit);
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_IMM) {
DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i);
si->kind = STACK_VALUE;
si->vtype = load_reg_stack_imm(emit, REG_TEMP0, si, false);
emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP0);
// using REG_TEMP1 to avoid clobbering REG_TEMP0 (aka REG_RET)
si->vtype = load_reg_stack_imm(emit, REG_TEMP1, si, false);
emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP1);
}
}
}
@@ -1132,8 +1167,8 @@ STATIC void emit_native_label_assign(emit_t *emit, mp_uint_t l) {
bool is_finally = false;
if (emit->exc_stack_size > 0) {
exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
is_finally = e->is_finally && e->label == l;
exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
is_finally = e->is_finally && e->label == l;
}
if (is_finally) {
@@ -1405,7 +1440,7 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
DEBUG_printf("load_fast(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
vtype_kind_t vtype = emit->local_vtype[local_num];
if (vtype == VTYPE_UNBOUND) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "local '%q' used before type known", qst);
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("local '%q' used before type known"), qst);
}
emit_native_pre(emit);
if (local_num < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit)) {
@@ -1580,7 +1615,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
}
default:
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't load from '%q'", vtype_to_qstr(vtype_base));
MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
}
} else {
// index is not an immediate
@@ -1590,7 +1625,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't load with '%q' index", vtype_to_qstr(vtype_index));
MP_ERROR_TEXT("can't load with '%q' index"), vtype_to_qstr(vtype_index));
}
switch (vtype_base) {
case VTYPE_PTR8: {
@@ -1618,7 +1653,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
}
default:
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't load from '%q'", vtype_to_qstr(vtype_base));
MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
}
}
emit_post_push_reg(emit, VTYPE_INT, REG_RET);
@@ -1642,7 +1677,7 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num)
} else if (emit->local_vtype[local_num] != vtype) {
// type of local is not the same as object stored in it
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"local '%q' has type '%q' but source is '%q'",
MP_ERROR_TEXT("local '%q' has type '%q' but source is '%q'"),
qst, vtype_to_qstr(emit->local_vtype[local_num]), vtype_to_qstr(vtype));
}
}
@@ -1735,7 +1770,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
int reg_index = REG_ARG_2;
int reg_value = REG_ARG_3;
emit_pre_pop_reg_flexible(emit, &vtype_base, &reg_base, reg_index, reg_value);
#if N_X86
#if N_X64 || N_X86
// special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
emit_pre_pop_reg(emit, &vtype_value, reg_value);
#else
@@ -1743,7 +1778,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
#endif
if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't store '%q'", vtype_to_qstr(vtype_value));
MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
}
switch (vtype_base) {
case VTYPE_PTR8: {
@@ -1809,7 +1844,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
}
default:
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't store to '%q'", vtype_to_qstr(vtype_base));
MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
}
} else {
// index is not an immediate
@@ -1820,9 +1855,9 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't store with '%q' index", vtype_to_qstr(vtype_index));
MP_ERROR_TEXT("can't store with '%q' index"), vtype_to_qstr(vtype_index));
}
#if N_X86
#if N_X64 || N_X86
// special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
emit_pre_pop_reg(emit, &vtype_value, reg_value);
#else
@@ -1830,7 +1865,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
#endif
if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't store '%q'", vtype_to_qstr(vtype_value));
MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
}
switch (vtype_base) {
case VTYPE_PTR8: {
@@ -1870,7 +1905,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
}
default:
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't store to '%q'", vtype_to_qstr(vtype_base));
MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
}
}
@@ -1992,7 +2027,7 @@ STATIC void emit_native_jump_helper(emit_t *emit, bool cond, mp_uint_t label, bo
}
if (!(vtype == VTYPE_BOOL || vtype == VTYPE_INT || vtype == VTYPE_UINT)) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't implicitly convert '%q' to 'bool'", vtype_to_qstr(vtype));
MP_ERROR_TEXT("can't implicitly convert '%q' to 'bool'"), vtype_to_qstr(vtype));
}
}
// For non-pop need to save the vtype so that emit_native_adjust_stack_size
@@ -2058,7 +2093,7 @@ STATIC void emit_native_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t exc
ASM_MOV_REG_PCREL(emit->as, REG_RET, label & ~MP_EMIT_BREAK_FROM_FOR);
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_RET);
// Cancel any active exception (see also emit_native_pop_except_jump)
emit_native_mov_reg_const(emit, REG_RET, MP_F_CONST_NONE_OBJ);
ASM_MOV_REG_IMM(emit->as, REG_RET, (mp_uint_t)MP_OBJ_NULL);
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_RET);
// Jump to the innermost active finally
label = first_finally->label;
@@ -2153,9 +2188,8 @@ STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); // get exc
// Check if exc is None and jump to non-exc handler if it is
emit_native_mov_reg_const(emit, REG_ARG_2, MP_F_CONST_NONE_OBJ);
ASM_JUMP_IF_REG_EQ(emit->as, REG_ARG_1, REG_ARG_2, *emit->label_slot + 2);
// Check if exc is MP_OBJ_NULL (i.e. zero) and jump to non-exc handler if it is
ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, *emit->label_slot + 2, false);
ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_1, 0); // get type(exc)
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_2); // push type(exc)
@@ -2175,9 +2209,9 @@ STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
emit_call(emit, MP_F_OBJ_IS_TRUE);
ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot + 1, true);
// Replace exception with None
// Replace exception with MP_OBJ_NULL.
emit_native_label_assign(emit, *emit->label_slot);
emit_native_mov_reg_const(emit, REG_TEMP0, MP_F_CONST_NONE_OBJ);
ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
// end of with cleanup nlr_catch block
@@ -2255,7 +2289,7 @@ STATIC void emit_native_for_iter_end(emit_t *emit) {
STATIC void emit_native_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) {
if (within_exc_handler) {
// Cancel any active exception so subsequent handlers don't see it
emit_native_mov_reg_const(emit, REG_TEMP0, MP_F_CONST_NONE_OBJ);
ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
} else {
emit_native_leave_exc_stack(emit, false);
@@ -2272,7 +2306,7 @@ STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
} else {
adjust_stack(emit, 1);
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"unary op %q not implemented", mp_unary_op_method_name[op]);
MP_ERROR_TEXT("unary op %q not implemented"), mp_unary_op_method_name[op]);
}
}
@@ -2280,7 +2314,8 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
DEBUG_printf("binary_op(" UINT_FMT ")\n", op);
vtype_kind_t vtype_lhs = peek_vtype(emit, 1);
vtype_kind_t vtype_rhs = peek_vtype(emit, 0);
if (vtype_lhs == VTYPE_INT && vtype_rhs == VTYPE_INT) {
if ((vtype_lhs == VTYPE_INT || vtype_lhs == VTYPE_UINT)
&& (vtype_rhs == VTYPE_INT || vtype_rhs == VTYPE_UINT)) {
// for integers, inplace and normal ops are equivalent, so use just normal ops
if (MP_BINARY_OP_INPLACE_OR <= op && op <= MP_BINARY_OP_INPLACE_POWER) {
op += MP_BINARY_OP_OR - MP_BINARY_OP_INPLACE_OR;
@@ -2297,9 +2332,13 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
if (op == MP_BINARY_OP_LSHIFT) {
ASM_LSL_REG(emit->as, REG_RET);
} else {
ASM_ASR_REG(emit->as, REG_RET);
if (vtype_lhs == VTYPE_UINT) {
ASM_LSR_REG(emit->as, REG_RET);
} else {
ASM_ASR_REG(emit->as, REG_RET);
}
}
emit_post_push_reg(emit, VTYPE_INT, REG_RET);
emit_post_push_reg(emit, vtype_lhs, REG_RET);
return;
}
#endif
@@ -2307,6 +2346,10 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
// special cases for floor-divide and module because we dispatch to helper functions
if (op == MP_BINARY_OP_FLOOR_DIVIDE || op == MP_BINARY_OP_MODULO) {
emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_2, &vtype_lhs, REG_ARG_1);
if (vtype_lhs != VTYPE_INT) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
MP_ERROR_TEXT("div/mod not implemented for uint"), mp_binary_op_method_name[op]);
}
if (op == MP_BINARY_OP_FLOOR_DIVIDE) {
emit_call(emit, MP_F_SMALL_INT_FLOOR_DIVIDE);
} else {
@@ -2319,33 +2362,41 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
int reg_rhs = REG_ARG_3;
emit_pre_pop_reg_flexible(emit, &vtype_rhs, &reg_rhs, REG_RET, REG_ARG_2);
emit_pre_pop_reg(emit, &vtype_lhs, REG_ARG_2);
#if !(N_X64 || N_X86)
if (op == MP_BINARY_OP_LSHIFT) {
ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
} else if (op == MP_BINARY_OP_RSHIFT) {
ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
} else
if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_RSHIFT) {
if (op == MP_BINARY_OP_LSHIFT) {
ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
} else {
if (vtype_lhs == VTYPE_UINT) {
ASM_LSR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
} else {
ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
}
}
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
return;
}
#endif
if (op == MP_BINARY_OP_OR) {
ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
} else if (op == MP_BINARY_OP_XOR) {
ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
} else if (op == MP_BINARY_OP_AND) {
ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
} else if (op == MP_BINARY_OP_ADD) {
ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
} else if (op == MP_BINARY_OP_SUBTRACT) {
ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
} else if (op == MP_BINARY_OP_MULTIPLY) {
ASM_MUL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
} else if (MP_BINARY_OP_LESS <= op && op <= MP_BINARY_OP_NOT_EQUAL) {
// comparison ops are (in enum order):
// MP_BINARY_OP_LESS
@@ -2354,11 +2405,26 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
// MP_BINARY_OP_LESS_EQUAL
// MP_BINARY_OP_MORE_EQUAL
// MP_BINARY_OP_NOT_EQUAL
if (vtype_lhs != vtype_rhs) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("comparison of int and uint"));
}
size_t op_idx = op - MP_BINARY_OP_LESS + (vtype_lhs == VTYPE_UINT ? 0 : 6);
need_reg_single(emit, REG_RET, 0);
#if N_X64
asm_x64_xor_r64_r64(emit->as, REG_RET, REG_RET);
asm_x64_cmp_r64_with_r64(emit->as, reg_rhs, REG_ARG_2);
static byte ops[6] = {
static byte ops[6 + 6] = {
// unsigned
ASM_X64_CC_JB,
ASM_X64_CC_JA,
ASM_X64_CC_JE,
ASM_X64_CC_JBE,
ASM_X64_CC_JAE,
ASM_X64_CC_JNE,
// signed
ASM_X64_CC_JL,
ASM_X64_CC_JG,
ASM_X64_CC_JE,
@@ -2366,11 +2432,19 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
ASM_X64_CC_JGE,
ASM_X64_CC_JNE,
};
asm_x64_setcc_r8(emit->as, ops[op - MP_BINARY_OP_LESS], REG_RET);
asm_x64_setcc_r8(emit->as, ops[op_idx], REG_RET);
#elif N_X86
asm_x86_xor_r32_r32(emit->as, REG_RET, REG_RET);
asm_x86_cmp_r32_with_r32(emit->as, reg_rhs, REG_ARG_2);
static byte ops[6] = {
static byte ops[6 + 6] = {
// unsigned
ASM_X86_CC_JB,
ASM_X86_CC_JA,
ASM_X86_CC_JE,
ASM_X86_CC_JBE,
ASM_X86_CC_JAE,
ASM_X86_CC_JNE,
// signed
ASM_X86_CC_JL,
ASM_X86_CC_JG,
ASM_X86_CC_JE,
@@ -2378,24 +2452,62 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
ASM_X86_CC_JGE,
ASM_X86_CC_JNE,
};
asm_x86_setcc_r8(emit->as, ops[op - MP_BINARY_OP_LESS], REG_RET);
asm_x86_setcc_r8(emit->as, ops[op_idx], REG_RET);
#elif N_THUMB
asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
static uint16_t ops[6] = {
ASM_THUMB_OP_ITE_GE,
#if MICROPY_EMIT_THUMB_ARMV7M
static uint16_t ops[6 + 6] = {
// unsigned
ASM_THUMB_OP_ITE_CC,
ASM_THUMB_OP_ITE_HI,
ASM_THUMB_OP_ITE_EQ,
ASM_THUMB_OP_ITE_LS,
ASM_THUMB_OP_ITE_CS,
ASM_THUMB_OP_ITE_NE,
// signed
ASM_THUMB_OP_ITE_LT,
ASM_THUMB_OP_ITE_GT,
ASM_THUMB_OP_ITE_EQ,
ASM_THUMB_OP_ITE_GT,
ASM_THUMB_OP_ITE_LE,
ASM_THUMB_OP_ITE_GE,
ASM_THUMB_OP_ITE_EQ,
ASM_THUMB_OP_ITE_NE,
};
static byte ret[6] = { 0, 1, 1, 0, 1, 0, };
asm_thumb_op16(emit->as, ops[op - MP_BINARY_OP_LESS]);
asm_thumb_mov_rlo_i8(emit->as, REG_RET, ret[op - MP_BINARY_OP_LESS]);
asm_thumb_mov_rlo_i8(emit->as, REG_RET, ret[op - MP_BINARY_OP_LESS] ^ 1);
asm_thumb_op16(emit->as, ops[op_idx]);
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
#else
static uint16_t ops[6 + 6] = {
// unsigned
ASM_THUMB_CC_CC,
ASM_THUMB_CC_HI,
ASM_THUMB_CC_EQ,
ASM_THUMB_CC_LS,
ASM_THUMB_CC_CS,
ASM_THUMB_CC_NE,
// signed
ASM_THUMB_CC_LT,
ASM_THUMB_CC_GT,
ASM_THUMB_CC_EQ,
ASM_THUMB_CC_LE,
ASM_THUMB_CC_GE,
ASM_THUMB_CC_NE,
};
asm_thumb_bcc_rel9(emit->as, ops[op_idx], 6);
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
asm_thumb_b_rel12(emit->as, 4);
asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
#endif
#elif N_ARM
asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
static uint ccs[6] = {
static uint ccs[6 + 6] = {
// unsigned
ASM_ARM_CC_CC,
ASM_ARM_CC_HI,
ASM_ARM_CC_EQ,
ASM_ARM_CC_LS,
ASM_ARM_CC_CS,
ASM_ARM_CC_NE,
// signed
ASM_ARM_CC_LT,
ASM_ARM_CC_GT,
ASM_ARM_CC_EQ,
@@ -2403,9 +2515,17 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
ASM_ARM_CC_GE,
ASM_ARM_CC_NE,
};
asm_arm_setcc_reg(emit->as, REG_RET, ccs[op - MP_BINARY_OP_LESS]);
asm_arm_setcc_reg(emit->as, REG_RET, ccs[op_idx]);
#elif N_XTENSA || N_XTENSAWIN
static uint8_t ccs[6] = {
static uint8_t ccs[6 + 6] = {
// unsigned
ASM_XTENSA_CC_LTU,
0x80 | ASM_XTENSA_CC_LTU, // for GTU we'll swap args
ASM_XTENSA_CC_EQ,
0x80 | ASM_XTENSA_CC_GEU, // for LEU we'll swap args
ASM_XTENSA_CC_GEU,
ASM_XTENSA_CC_NE,
// signed
ASM_XTENSA_CC_LT,
0x80 | ASM_XTENSA_CC_LT, // for GT we'll swap args
ASM_XTENSA_CC_EQ,
@@ -2413,21 +2533,21 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
ASM_XTENSA_CC_GE,
ASM_XTENSA_CC_NE,
};
uint8_t cc = ccs[op - MP_BINARY_OP_LESS];
uint8_t cc = ccs[op_idx];
if ((cc & 0x80) == 0) {
asm_xtensa_setcc_reg_reg_reg(emit->as, cc, REG_RET, REG_ARG_2, reg_rhs);
} else {
asm_xtensa_setcc_reg_reg_reg(emit->as, cc & ~0x80, REG_RET, reg_rhs, REG_ARG_2);
}
#else
#error not implemented
#error not implemented
#endif
emit_post_push_reg(emit, VTYPE_BOOL, REG_RET);
} else {
// TODO other ops not yet implemented
adjust_stack(emit, 1);
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"binary op %q not implemented", mp_binary_op_method_name[op]);
MP_ERROR_TEXT("binary op %q not implemented"), mp_binary_op_method_name[op]);
}
} else if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_3, &vtype_lhs, REG_ARG_2);
@@ -2448,7 +2568,7 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
} else {
adjust_stack(emit, -1);
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"can't do binary op between '%q' and '%q'",
MP_ERROR_TEXT("can't do binary op between '%q' and '%q'"),
vtype_to_qstr(vtype_lhs), vtype_to_qstr(vtype_rhs));
}
}
@@ -2626,7 +2746,7 @@ STATIC void emit_native_call_function(emit_t *emit, mp_uint_t n_positional, mp_u
break;
default:
// this can happen when casting a cast: int(int)
mp_raise_NotImplementedError("casting");
mp_raise_NotImplementedError(MP_ERROR_TEXT("casting"));
}
} else {
assert(vtype_fun == VTYPE_PYOBJ);
@@ -2690,7 +2810,7 @@ STATIC void emit_native_return_value(emit_t *emit) {
emit_pre_pop_reg(emit, &vtype, return_vtype == VTYPE_PYOBJ ? REG_PARENT_RET : REG_ARG_1);
if (vtype != return_vtype) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
"return expected '%q' but got '%q'",
MP_ERROR_TEXT("return expected '%q' but got '%q'"),
vtype_to_qstr(return_vtype), vtype_to_qstr(vtype));
}
}
@@ -2719,7 +2839,7 @@ STATIC void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) {
vtype_kind_t vtype_exc;
emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise
if (vtype_exc != VTYPE_PYOBJ) {
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, "must raise an object");
EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("must raise an object"));
}
// TODO probably make this 1 call to the runtime (which could even call convert, native_raise(obj, type))
emit_call(emit, MP_F_NATIVE_RAISE);
@@ -2729,7 +2849,7 @@ STATIC void emit_native_yield(emit_t *emit, int kind) {
// Note: 1 (yield) or 3 (yield from) labels are reserved for this function, starting at *emit->label_slot
if (emit->do_viper_types) {
mp_raise_NotImplementedError("native yield");
mp_raise_NotImplementedError(MP_ERROR_TEXT("native yield"));
}
emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
@@ -2775,6 +2895,7 @@ STATIC void emit_native_yield(emit_t *emit, int kind) {
// Found active handler, get its PC
ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
break;
}
}
}

View File

@@ -68,11 +68,20 @@ union floatbits {
float f;
uint32_t u;
};
static inline int fp_signbit(float x) { union floatbits fb = {x}; return fb.u & FLT_SIGN_MASK; }
static inline int fp_signbit(float x) {
union floatbits fb = {x};
return fb.u & FLT_SIGN_MASK;
}
#define fp_isnan(x) isnan(x)
#define fp_isinf(x) isinf(x)
static inline int fp_iszero(float x) { union floatbits fb = {x}; return fb.u == 0; }
static inline int fp_isless1(float x) { union floatbits fb = {x}; return fb.u < 0x3f800000; }
static inline int fp_iszero(float x) {
union floatbits fb = {x};
return fb.u == 0;
}
static inline int fp_isless1(float x) {
union floatbits fb = {x};
return fb.u < 0x3f800000;
}
#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
@@ -91,15 +100,15 @@ static inline int fp_isless1(float x) { union floatbits fb = {x}; return fb.u <
static const FPTYPE g_pos_pow[] = {
#if FPDECEXP > 32
1e256, 1e128, 1e64,
MICROPY_FLOAT_CONST(1e256), MICROPY_FLOAT_CONST(1e128), MICROPY_FLOAT_CONST(1e64),
#endif
1e32, 1e16, 1e8, 1e4, 1e2, 1e1
MICROPY_FLOAT_CONST(1e32), MICROPY_FLOAT_CONST(1e16), MICROPY_FLOAT_CONST(1e8), MICROPY_FLOAT_CONST(1e4), MICROPY_FLOAT_CONST(1e2), MICROPY_FLOAT_CONST(1e1)
};
static const FPTYPE g_neg_pow[] = {
#if FPDECEXP > 32
1e-256, 1e-128, 1e-64,
MICROPY_FLOAT_CONST(1e-256), MICROPY_FLOAT_CONST(1e-128), MICROPY_FLOAT_CONST(1e-64),
#endif
1e-32, 1e-16, 1e-8, 1e-4, 1e-2, 1e-1
MICROPY_FLOAT_CONST(1e-32), MICROPY_FLOAT_CONST(1e-16), MICROPY_FLOAT_CONST(1e-8), MICROPY_FLOAT_CONST(1e-4), MICROPY_FLOAT_CONST(1e-2), MICROPY_FLOAT_CONST(1e-1)
};
int mp_format_float(FPTYPE f, char *buf, size_t buf_size, char fmt, int prec, char sign) {
@@ -282,7 +291,7 @@ int mp_format_float(FPTYPE f, char *buf, size_t buf_size, char fmt, int prec, ch
if (fmt == 'e' && prec > (buf_remaining - FPMIN_BUF_SIZE)) {
prec = buf_remaining - FPMIN_BUF_SIZE;
}
if (fmt == 'g'){
if (fmt == 'g') {
// Truncate precision to prevent buffer overflow
if (prec + (FPMIN_BUF_SIZE - 1) > buf_remaining) {
prec = buf_remaining - (FPMIN_BUF_SIZE - 1);

View File

@@ -146,7 +146,7 @@ int mp_find_frozen_module(const char *str, size_t len, void **data) {
#if MICROPY_MODULE_FROZEN_MPY
const mp_raw_code_t *rc = mp_find_frozen_mpy(str, len);
if (rc != NULL) {
*data = (void*)rc;
*data = (void *)rc;
return MP_FROZEN_MPY;
}
#endif

View File

@@ -49,7 +49,7 @@
// detect untraced object still in use
#define CLEAR_ON_SWEEP (0)
#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / BYTES_PER_WORD)
#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / MP_BYTES_PER_OBJ_WORD)
#define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
// ATB = allocation table byte
@@ -82,7 +82,7 @@
#define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
#define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
#define BLOCK_FROM_PTR(ptr) (((byte*)(ptr) - MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
#define BLOCK_FROM_PTR(ptr) (((byte *)(ptr) - MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (uintptr_t)MP_STATE_MEM(gc_pool_start)))
#define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB)
@@ -108,49 +108,49 @@
// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
void gc_init(void *start, void *end) {
// align end pointer on block boundary
end = (void*)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte*)end - (byte*)start);
end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
// calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
// T = A + F + P
// F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB
// P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK
// => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
size_t total_byte_len = (byte*)end - (byte*)start;
#if MICROPY_ENABLE_FINALISER
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * BITS_PER_BYTE / (BITS_PER_BYTE + BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
#else
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
#endif
size_t total_byte_len = (byte *)end - (byte *)start;
#if MICROPY_ENABLE_FINALISER
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len * MP_BITS_PER_BYTE / (MP_BITS_PER_BYTE + MP_BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + MP_BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
#else
MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + MP_BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
#endif
MP_STATE_MEM(gc_alloc_table_start) = (byte*)start;
MP_STATE_MEM(gc_alloc_table_start) = (byte *)start;
#if MICROPY_ENABLE_FINALISER
#if MICROPY_ENABLE_FINALISER
size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len);
#endif
#endif
size_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
MP_STATE_MEM(gc_pool_start) = (byte*)end - gc_pool_block_len * BYTES_PER_BLOCK;
MP_STATE_MEM(gc_pool_start) = (byte *)end - gc_pool_block_len * BYTES_PER_BLOCK;
MP_STATE_MEM(gc_pool_end) = end;
#if MICROPY_ENABLE_FINALISER
#if MICROPY_ENABLE_FINALISER
assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
#endif
#endif
// clear ATBs
memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len));
#if MICROPY_ENABLE_FINALISER
#if MICROPY_ENABLE_FINALISER
// clear FTBs
memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
#endif
#endif
// set last free ATB index to start of heap
MP_STATE_MEM(gc_last_free_atb_index) = 0;
// unlock the GC
MP_STATE_MEM(gc_lock_depth) = 0;
MP_STATE_THREAD(gc_lock_depth) = 0;
// allow auto collection
MP_STATE_MEM(gc_auto_collect_enabled) = 1;
@@ -161,39 +161,40 @@ void gc_init(void *start, void *end) {
MP_STATE_MEM(gc_alloc_amount) = 0;
#endif
#if MICROPY_PY_THREAD
#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
#endif
DEBUG_printf("GC layout:\n");
DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
#if MICROPY_ENABLE_FINALISER
#if MICROPY_ENABLE_FINALISER
DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
#endif
#endif
DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
}
void gc_lock(void) {
GC_ENTER();
MP_STATE_MEM(gc_lock_depth)++;
GC_EXIT();
// This does not need to be atomic or have the GC mutex because:
// - each thread has its own gc_lock_depth so there are no races between threads;
// - a hard interrupt will only change gc_lock_depth during its execution, and
// upon return will restore the value of gc_lock_depth.
MP_STATE_THREAD(gc_lock_depth)++;
}
void gc_unlock(void) {
GC_ENTER();
MP_STATE_MEM(gc_lock_depth)--;
GC_EXIT();
// This does not need to be atomic, See comment above in gc_lock.
MP_STATE_THREAD(gc_lock_depth)--;
}
bool gc_is_locked(void) {
return MP_STATE_MEM(gc_lock_depth) != 0;
return MP_STATE_THREAD(gc_lock_depth) != 0;
}
// ptr should be of type void*
#define VERIFY_PTR(ptr) ( \
((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
&& ptr >= (void*)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
&& ptr < (void*)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
&& ptr >= (void *)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
&& ptr < (void *)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
)
#ifndef TRACE_MARK
@@ -219,8 +220,8 @@ STATIC void gc_mark_subtree(size_t block) {
} while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
// check this block's children
void **ptrs = (void**)PTR_FROM_BLOCK(block);
for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void*); i > 0; i--, ptrs++) {
void **ptrs = (void **)PTR_FROM_BLOCK(block);
for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void *); i > 0; i--, ptrs++) {
void *ptr = *ptrs;
if (VERIFY_PTR(ptr)) {
// Mark and push this pointer
@@ -271,9 +272,9 @@ STATIC void gc_sweep(void) {
for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
switch (ATB_GET_KIND(block)) {
case AT_HEAD:
#if MICROPY_ENABLE_FINALISER
#if MICROPY_ENABLE_FINALISER
if (FTB_GET(block)) {
mp_obj_base_t *obj = (mp_obj_base_t*)PTR_FROM_BLOCK(block);
mp_obj_base_t *obj = (mp_obj_base_t *)PTR_FROM_BLOCK(block);
if (obj->type != NULL) {
// if the object has a type then see if it has a __del__ method
mp_obj_t dest[2];
@@ -292,19 +293,20 @@ STATIC void gc_sweep(void) {
// clear finaliser flag
FTB_CLEAR(block);
}
#endif
#endif
free_tail = 1;
DEBUG_printf("gc_sweep(%p)\n", PTR_FROM_BLOCK(block));
DEBUG_printf("gc_sweep(%p)\n", (void *)PTR_FROM_BLOCK(block));
#if MICROPY_PY_GC_COLLECT_RETVAL
MP_STATE_MEM(gc_collected)++;
#endif
// fall through to free the head
MP_FALLTHROUGH
case AT_TAIL:
if (free_tail) {
ATB_ANY_TO_FREE(block);
#if CLEAR_ON_SWEEP
memset((void*)PTR_FROM_BLOCK(block), 0, BYTES_PER_BLOCK);
memset((void *)PTR_FROM_BLOCK(block), 0, BYTES_PER_BLOCK);
#endif
}
break;
@@ -319,7 +321,7 @@ STATIC void gc_sweep(void) {
void gc_collect_start(void) {
GC_ENTER();
MP_STATE_MEM(gc_lock_depth)++;
MP_STATE_THREAD(gc_lock_depth)++;
#if MICROPY_GC_ALLOC_THRESHOLD
MP_STATE_MEM(gc_alloc_amount) = 0;
#endif
@@ -328,21 +330,31 @@ void gc_collect_start(void) {
// Trace root pointers. This relies on the root pointers being organised
// correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
// dict_globals, then the root pointer section of mp_state_vm.
void **ptrs = (void**)(void*)&mp_state_ctx;
void **ptrs = (void **)(void *)&mp_state_ctx;
size_t root_start = offsetof(mp_state_ctx_t, thread.dict_locals);
size_t root_end = offsetof(mp_state_ctx_t, vm.qstr_last_chunk);
gc_collect_root(ptrs + root_start / sizeof(void*), (root_end - root_start) / sizeof(void*));
gc_collect_root(ptrs + root_start / sizeof(void *), (root_end - root_start) / sizeof(void *));
#if MICROPY_ENABLE_PYSTACK
// Trace root pointers from the Python stack.
ptrs = (void**)(void*)MP_STATE_THREAD(pystack_start);
gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void*));
ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
#endif
}
// Address sanitizer needs to know that the access to ptrs[i] must always be
// considered OK, even if it's a load from an address that would normally be
// prohibited (due to being undefined, in a red zone, etc).
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
__attribute__((no_sanitize_address))
#endif
static void *gc_get_ptr(void **ptrs, int i) {
return ptrs[i];
}
void gc_collect_root(void **ptrs, size_t len) {
for (size_t i = 0; i < len; i++) {
void *ptr = ptrs[i];
void *ptr = gc_get_ptr(ptrs, i);
if (VERIFY_PTR(ptr)) {
size_t block = BLOCK_FROM_PTR(ptr);
if (ATB_GET_KIND(block) == AT_HEAD) {
@@ -359,13 +371,13 @@ void gc_collect_end(void) {
gc_deal_with_stack_overflow();
gc_sweep();
MP_STATE_MEM(gc_last_free_atb_index) = 0;
MP_STATE_MEM(gc_lock_depth)--;
MP_STATE_THREAD(gc_lock_depth)--;
GC_EXIT();
}
void gc_sweep_all(void) {
GC_ENTER();
MP_STATE_MEM(gc_lock_depth)++;
MP_STATE_THREAD(gc_lock_depth)++;
MP_STATE_MEM(gc_stack_overflow) = 0;
gc_collect_end();
}
@@ -444,14 +456,13 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
return NULL;
}
GC_ENTER();
// check if GC is locked
if (MP_STATE_MEM(gc_lock_depth) > 0) {
GC_EXIT();
if (MP_STATE_THREAD(gc_lock_depth) > 0) {
return NULL;
}
GC_ENTER();
size_t i;
size_t end_block;
size_t start_block;
@@ -473,10 +484,12 @@ void *gc_alloc(size_t n_bytes, unsigned int alloc_flags) {
n_free = 0;
for (i = MP_STATE_MEM(gc_last_free_atb_index); i < MP_STATE_MEM(gc_alloc_table_byte_len); i++) {
byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
// *FORMAT-OFF*
if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
// *FORMAT-ON*
}
GC_EXIT();
@@ -516,7 +529,7 @@ found:
// get pointer to first block
// we must create this pointer before unlocking the GC so a collection can find it
void *ret_ptr = (void*)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
void *ret_ptr = (void *)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
#if MICROPY_GC_ALLOC_THRESHOLD
@@ -527,20 +540,20 @@ found:
#if MICROPY_GC_CONSERVATIVE_CLEAR
// be conservative and zero out all the newly allocated blocks
memset((byte*)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK);
memset((byte *)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK);
#else
// zero out the additional bytes of the newly allocated blocks
// This is needed because the blocks may have previously held pointers
// to the heap and will not be set to something else if the caller
// doesn't actually use the entire block. As such they will continue
// to point to the heap and may prevent other blocks from being reclaimed.
memset((byte*)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
memset((byte *)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
#endif
#if MICROPY_ENABLE_FINALISER
if (has_finaliser) {
// clear type pointer in case it is never set
((mp_obj_base_t*)ret_ptr)->type = NULL;
((mp_obj_base_t *)ret_ptr)->type = NULL;
// set mp_obj flag only if it has a finaliser
GC_ENTER();
FTB_SET(start_block);
@@ -570,13 +583,13 @@ void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
// force the freeing of a piece of memory
// TODO: freeing here does not call finaliser
void gc_free(void *ptr) {
GC_ENTER();
if (MP_STATE_MEM(gc_lock_depth) > 0) {
if (MP_STATE_THREAD(gc_lock_depth) > 0) {
// TODO how to deal with this error?
GC_EXIT();
return;
}
GC_ENTER();
DEBUG_printf("gc_free(%p)\n", ptr);
if (ptr == NULL) {
@@ -641,11 +654,11 @@ void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
if (ptr == NULL) {
has_finaliser = false;
} else {
#if MICROPY_ENABLE_FINALISER
#if MICROPY_ENABLE_FINALISER
has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr));
#else
#else
has_finaliser = false;
#endif
#endif
}
void *ptr2 = gc_alloc(n_bytes, has_finaliser);
if (ptr2 == NULL) {
@@ -671,15 +684,14 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
return NULL;
}
if (MP_STATE_THREAD(gc_lock_depth) > 0) {
return NULL;
}
void *ptr = ptr_in;
GC_ENTER();
if (MP_STATE_MEM(gc_lock_depth) > 0) {
GC_EXIT();
return NULL;
}
// get the GC block number corresponding to this pointer
assert(VERIFY_PTR(ptr));
size_t block = BLOCK_FROM_PTR(ptr);
@@ -694,7 +706,7 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
// free blocks to satisfy the realloc. Note that we need to compute the
// total size of the existing memory chunk so we can correctly and
// efficiently shrink it (see below for shrinking code).
size_t n_free = 0;
size_t n_free = 0;
size_t n_blocks = 1; // counting HEAD block
size_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
for (size_t bl = block + n_blocks; bl < max_block; bl++) {
@@ -753,10 +765,10 @@ void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
#if MICROPY_GC_CONSERVATIVE_CLEAR
// be conservative and zero out all the newly allocated blocks
memset((byte*)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK);
memset((byte *)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK);
#else
// zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc)
memset((byte*)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
memset((byte *)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
#endif
#if EXTENSIVE_HEAP_PROFILING
@@ -800,7 +812,7 @@ void gc_dump_info(void) {
mp_printf(&mp_plat_print, "GC: total: %u, used: %u, free: %u\n",
(uint)info.total, (uint)info.used, (uint)info.free);
mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n",
(uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free);
(uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free);
}
void gc_dump_alloc_table(void) {
@@ -832,12 +844,14 @@ void gc_dump_alloc_table(void) {
}
// print header for new line of blocks
// (the cast to uint32_t is for 16-bit ports)
//mp_printf(&mp_plat_print, "\n%05x: ", (uint)(PTR_FROM_BLOCK(bl) & (uint32_t)0xfffff));
// mp_printf(&mp_plat_print, "\n%05x: ", (uint)(PTR_FROM_BLOCK(bl) & (uint32_t)0xfffff));
mp_printf(&mp_plat_print, "\n%05x: ", (uint)((bl * BYTES_PER_BLOCK) & (uint32_t)0xfffff));
}
int c = ' ';
switch (ATB_GET_KIND(bl)) {
case AT_FREE: c = '.'; break;
case AT_FREE:
c = '.';
break;
/* this prints out if the object is reachable from BSS or STACK (for unix only)
case AT_HEAD: {
c = 'h';
@@ -866,35 +880,48 @@ void gc_dump_alloc_table(void) {
*/
/* this prints the uPy object type of the head block */
case AT_HEAD: {
void **ptr = (void**)(MP_STATE_MEM(gc_pool_start) + bl * BYTES_PER_BLOCK);
if (*ptr == &mp_type_tuple) { c = 'T'; }
else if (*ptr == &mp_type_list) { c = 'L'; }
else if (*ptr == &mp_type_dict) { c = 'D'; }
else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) { c = 'S'; }
void **ptr = (void **)(MP_STATE_MEM(gc_pool_start) + bl * BYTES_PER_BLOCK);
if (*ptr == &mp_type_tuple) {
c = 'T';
} else if (*ptr == &mp_type_list) {
c = 'L';
} else if (*ptr == &mp_type_dict) {
c = 'D';
} else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) {
c = 'S';
}
#if MICROPY_PY_BUILTINS_BYTEARRAY
else if (*ptr == &mp_type_bytearray) { c = 'A'; }
else if (*ptr == &mp_type_bytearray) {
c = 'A';
}
#endif
#if MICROPY_PY_ARRAY
else if (*ptr == &mp_type_array) { c = 'A'; }
else if (*ptr == &mp_type_array) {
c = 'A';
}
#endif
#if MICROPY_PY_BUILTINS_FLOAT
else if (*ptr == &mp_type_float) { c = 'F'; }
else if (*ptr == &mp_type_float) {
c = 'F';
}
#endif
else if (*ptr == &mp_type_fun_bc) { c = 'B'; }
else if (*ptr == &mp_type_module) { c = 'M'; }
else {
else if (*ptr == &mp_type_fun_bc) {
c = 'B';
} else if (*ptr == &mp_type_module) {
c = 'M';
} else {
c = 'h';
#if 0
// This code prints "Q" for qstr-pool data, and "q" for qstr-str
// data. It can be useful to see how qstrs are being allocated,
// but is disabled by default because it is very slow.
for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) {
if ((qstr_pool_t*)ptr == pool) {
if ((qstr_pool_t *)ptr == pool) {
c = 'Q';
break;
}
for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
if ((const byte*)ptr == *q) {
if ((const byte *)ptr == *q) {
c = 'q';
break;
}
@@ -904,8 +931,12 @@ void gc_dump_alloc_table(void) {
}
break;
}
case AT_TAIL: c = '='; break;
case AT_MARK: c = 'm'; break;
case AT_TAIL:
c = '=';
break;
case AT_MARK:
c = 'm';
break;
}
mp_printf(&mp_plat_print, "%c", c);
}
@@ -931,11 +962,11 @@ void gc_test(void) {
p2[1] = p;
ptrs[0] = p2;
}
for (int i = 0; i < 25; i+=2) {
for (int i = 0; i < 25; i += 2) {
mp_uint_t *p = gc_alloc(i, false);
printf("p=%p\n", p);
if (i & 3) {
//ptrs[i] = p;
// ptrs[i] = p;
}
}
@@ -943,7 +974,7 @@ void gc_test(void) {
gc_dump_alloc_table();
printf("Starting GC...\n");
gc_collect_start();
gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void*));
gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void *));
gc_collect_end();
printf("After GC:\n");
gc_dump_alloc_table();

View File

@@ -26,10 +26,8 @@
#ifndef MICROPY_INCLUDED_PY_GC_H
#define MICROPY_INCLUDED_PY_GC_H
#include <stdint.h>
#include "py/mpconfig.h"
#include "py/misc.h"
#include <stdbool.h>
#include <stddef.h>
void gc_init(void *start, void *end);

View File

@@ -3,7 +3,7 @@
*
* The MIT License (MIT)
*
* Copyright (c) 2013-2015 Damien P. George
* Copyright (c) 2013-2020 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -24,10 +24,17 @@
* THE SOFTWARE.
*/
// *FORMAT-OFF*
// rules for writing rules:
// - zero_or_more is implemented using opt_rule around a one_or_more rule
// - don't put opt_rule in arguments of or rule; instead, wrap the call to this or rule in opt_rule
// Generic sub-rules used by multiple rules below.
DEF_RULE_NC(generic_colon_test, and_ident(2), tok(DEL_COLON), rule(test))
DEF_RULE_NC(generic_equal_test, and_ident(2), tok(DEL_EQUAL), rule(test))
// # Start symbols for the grammar:
// # single_input is a single interactive statement;
// # file_input is a module or sequence of commands read from an input file;
@@ -69,19 +76,16 @@ DEF_RULE_NC(funcdefrettype, and_ident(2), tok(DEL_MINUS_MORE), rule(test))
// note: typedargslist lets through more than is allowed, compiler does further checks
DEF_RULE_NC(typedargslist, list_with_end, rule(typedargslist_item), tok(DEL_COMMA))
DEF_RULE_NC(typedargslist_item, or(3), rule(typedargslist_name), rule(typedargslist_star), rule(typedargslist_dbl_star))
DEF_RULE_NC(typedargslist_name, and_ident(3), tok(NAME), opt_rule(typedargslist_colon), opt_rule(typedargslist_equal))
DEF_RULE_NC(typedargslist_name, and_ident(3), tok(NAME), opt_rule(generic_colon_test), opt_rule(generic_equal_test))
DEF_RULE_NC(typedargslist_star, and(2), tok(OP_STAR), opt_rule(tfpdef))
DEF_RULE_NC(typedargslist_dbl_star, and(3), tok(OP_DBL_STAR), tok(NAME), opt_rule(typedargslist_colon))
DEF_RULE_NC(typedargslist_colon, and_ident(2), tok(DEL_COLON), rule(test))
DEF_RULE_NC(typedargslist_equal, and_ident(2), tok(DEL_EQUAL), rule(test))
DEF_RULE_NC(tfpdef, and(2), tok(NAME), opt_rule(typedargslist_colon))
DEF_RULE_NC(typedargslist_dbl_star, and(3), tok(OP_DBL_STAR), tok(NAME), opt_rule(generic_colon_test))
DEF_RULE_NC(tfpdef, and(2), tok(NAME), opt_rule(generic_colon_test))
// note: varargslist lets through more than is allowed, compiler does further checks
DEF_RULE_NC(varargslist, list_with_end, rule(varargslist_item), tok(DEL_COMMA))
DEF_RULE_NC(varargslist_item, or(3), rule(varargslist_name), rule(varargslist_star), rule(varargslist_dbl_star))
DEF_RULE_NC(varargslist_name, and_ident(2), tok(NAME), opt_rule(varargslist_equal))
DEF_RULE_NC(varargslist_name, and_ident(2), tok(NAME), opt_rule(generic_equal_test))
DEF_RULE_NC(varargslist_star, and(2), tok(OP_STAR), opt_rule(vfpdef))
DEF_RULE_NC(varargslist_dbl_star, and(2), tok(OP_DBL_STAR), tok(NAME))
DEF_RULE_NC(varargslist_equal, and_ident(2), tok(DEL_EQUAL), rule(test))
DEF_RULE_NC(vfpdef, and_ident(1), tok(NAME))
// stmt: compound_stmt | simple_stmt
@@ -94,20 +98,22 @@ DEF_RULE_NC(simple_stmt, and_ident(2), rule(simple_stmt_2), tok(NEWLINE))
DEF_RULE(simple_stmt_2, c(generic_all_nodes), list_with_end, rule(small_stmt), tok(DEL_SEMICOLON))
// small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt
// expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*)
// expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*)
// testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
// annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
// augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//='
// # For normal assignments, additional restrictions enforced by the interpreter
// # For normal and annotated assignments, additional restrictions enforced by the interpreter
DEF_RULE_NC(small_stmt, or(8), rule(del_stmt), rule(pass_stmt), rule(flow_stmt), rule(import_stmt), rule(global_stmt), rule(nonlocal_stmt), rule(assert_stmt), rule(expr_stmt))
DEF_RULE(expr_stmt, c(expr_stmt), and(2), rule(testlist_star_expr), opt_rule(expr_stmt_2))
DEF_RULE_NC(expr_stmt_2, or(2), rule(expr_stmt_augassign), rule(expr_stmt_assign_list))
DEF_RULE_NC(expr_stmt_2, or(3), rule(annassign), rule(expr_stmt_augassign), rule(expr_stmt_assign_list))
DEF_RULE_NC(expr_stmt_augassign, and_ident(2), rule(augassign), rule(expr_stmt_6))
DEF_RULE_NC(expr_stmt_assign_list, one_or_more, rule(expr_stmt_assign))
DEF_RULE_NC(expr_stmt_assign, and_ident(2), tok(DEL_EQUAL), rule(expr_stmt_6))
DEF_RULE_NC(expr_stmt_6, or(2), rule(yield_expr), rule(testlist_star_expr))
DEF_RULE(testlist_star_expr, c(generic_tuple), list_with_end, rule(testlist_star_expr_2), tok(DEL_COMMA))
DEF_RULE_NC(testlist_star_expr_2, or(2), rule(star_expr), rule(test))
DEF_RULE_NC(annassign, and(3), tok(DEL_COLON), rule(test), opt_rule(expr_stmt_assign))
DEF_RULE_NC(augassign, or(13), tok(DEL_PLUS_EQUAL), tok(DEL_MINUS_EQUAL), tok(DEL_STAR_EQUAL), tok(DEL_AT_EQUAL), tok(DEL_SLASH_EQUAL), tok(DEL_PERCENT_EQUAL), tok(DEL_AMPERSAND_EQUAL), tok(DEL_PIPE_EQUAL), tok(DEL_CARET_EQUAL), tok(DEL_DBL_LESS_EQUAL), tok(DEL_DBL_MORE_EQUAL), tok(DEL_DBL_STAR_EQUAL), tok(DEL_DBL_SLASH_EQUAL))
// del_stmt: 'del' exprlist
@@ -182,10 +188,10 @@ DEF_RULE_NC(async_stmt_2, or(3), rule(funcdef), rule(with_stmt), rule(for_stmt))
#else
DEF_RULE_NC(compound_stmt, or(8), rule(if_stmt), rule(while_stmt), rule(for_stmt), rule(try_stmt), rule(with_stmt), rule(funcdef), rule(classdef), rule(decorated))
#endif
DEF_RULE(if_stmt, c(if_stmt), and(6), tok(KW_IF), rule(test), tok(DEL_COLON), rule(suite), opt_rule(if_stmt_elif_list), opt_rule(else_stmt))
DEF_RULE(if_stmt, c(if_stmt), and(6), tok(KW_IF), rule(namedexpr_test), tok(DEL_COLON), rule(suite), opt_rule(if_stmt_elif_list), opt_rule(else_stmt))
DEF_RULE_NC(if_stmt_elif_list, one_or_more, rule(if_stmt_elif))
DEF_RULE_NC(if_stmt_elif, and(4), tok(KW_ELIF), rule(test), tok(DEL_COLON), rule(suite))
DEF_RULE(while_stmt, c(while_stmt), and(5), tok(KW_WHILE), rule(test), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
DEF_RULE_NC(if_stmt_elif, and(4), tok(KW_ELIF), rule(namedexpr_test), tok(DEL_COLON), rule(suite))
DEF_RULE(while_stmt, c(while_stmt), and(5), tok(KW_WHILE), rule(namedexpr_test), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
DEF_RULE(for_stmt, c(for_stmt), and(7), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(testlist), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
DEF_RULE(try_stmt, c(try_stmt), and(4), tok(KW_TRY), tok(DEL_COLON), rule(suite), rule(try_stmt_2))
DEF_RULE_NC(try_stmt_2, or(2), rule(try_stmt_except_and_more), rule(try_stmt_finally))
@@ -208,6 +214,12 @@ DEF_RULE(suite_block_stmts, c(generic_all_nodes), one_or_more, rule(stmt))
// lambdef: 'lambda' [varargslist] ':' test
// lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
#if MICROPY_PY_ASSIGN_EXPR
DEF_RULE(namedexpr_test, c(namedexpr), and_ident(2), rule(test), opt_rule(namedexpr_test_2))
DEF_RULE_NC(namedexpr_test_2, and_ident(2), tok(OP_ASSIGN), rule(test))
#else
DEF_RULE_NC(namedexpr_test, or(1), rule(test))
#endif
DEF_RULE_NC(test, or(2), rule(lambdef), rule(test_if_expr))
DEF_RULE(test_if_expr, c(test_if_expr), and_ident(2), rule(or_test), opt_rule(test_if_else))
DEF_RULE_NC(test_if_else, and(4), tok(KW_IF), rule(or_test), tok(KW_ELSE), rule(test))
@@ -274,7 +286,7 @@ DEF_RULE_NC(atom_2b, or(2), rule(yield_expr), rule(testlist_comp))
DEF_RULE(atom_bracket, c(atom_bracket), and(3), tok(DEL_BRACKET_OPEN), opt_rule(testlist_comp), tok(DEL_BRACKET_CLOSE))
DEF_RULE(atom_brace, c(atom_brace), and(3), tok(DEL_BRACE_OPEN), opt_rule(dictorsetmaker), tok(DEL_BRACE_CLOSE))
DEF_RULE_NC(testlist_comp, and_ident(2), rule(testlist_comp_2), opt_rule(testlist_comp_3))
DEF_RULE_NC(testlist_comp_2, or(2), rule(star_expr), rule(test))
DEF_RULE_NC(testlist_comp_2, or(2), rule(star_expr), rule(namedexpr_test))
DEF_RULE_NC(testlist_comp_3, or(2), rule(comp_for), rule(testlist_comp_3b))
DEF_RULE_NC(testlist_comp_3b, and_ident(2), tok(DEL_COMMA), opt_rule(testlist_comp_3c))
DEF_RULE_NC(testlist_comp_3c, list_with_end, rule(testlist_comp_2), tok(DEL_COMMA))
@@ -310,8 +322,7 @@ DEF_RULE(testlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
// TODO dictorsetmaker lets through more than is allowed
DEF_RULE_NC(dictorsetmaker, and_ident(2), rule(dictorsetmaker_item), opt_rule(dictorsetmaker_tail))
#if MICROPY_PY_BUILTINS_SET
DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and_ident(2), rule(test), opt_rule(dictorsetmaker_colon))
DEF_RULE_NC(dictorsetmaker_colon, and_ident(2), tok(DEL_COLON), rule(test))
DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and_ident(2), rule(test), opt_rule(generic_colon_test))
#else
DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and(3), rule(test), tok(DEL_COLON), rule(test))
#endif
@@ -340,8 +351,12 @@ DEF_RULE_NC(arglist_dbl_star, and(2), tok(OP_DBL_STAR), rule(test))
// comp_if: 'if' test_nocond [comp_iter]
DEF_RULE_NC(argument, and_ident(2), rule(test), opt_rule(argument_2))
DEF_RULE_NC(argument_2, or(2), rule(comp_for), rule(argument_3))
DEF_RULE_NC(argument_3, and_ident(2), tok(DEL_EQUAL), rule(test))
#if MICROPY_PY_ASSIGN_EXPR
DEF_RULE_NC(argument_2, or(3), rule(comp_for), rule(generic_equal_test), rule(argument_3))
DEF_RULE_NC(argument_3, and(2), tok(OP_ASSIGN), rule(test))
#else
DEF_RULE_NC(argument_2, or(2), rule(comp_for), rule(generic_equal_test))
#endif
DEF_RULE_NC(comp_iter, or(2), rule(comp_for), rule(comp_if))
DEF_RULE_NC(comp_for, and_blank(5), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(or_test), opt_rule(comp_iter))
DEF_RULE_NC(comp_if, and(3), tok(KW_IF), rule(test_nocond), opt_rule(comp_iter))

View File

@@ -62,6 +62,12 @@ STATIC bool is_char_or3(mp_lexer_t *lex, byte c1, byte c2, byte c3) {
return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3;
}
#if MICROPY_PY_FSTRINGS
STATIC bool is_char_or4(mp_lexer_t *lex, byte c1, byte c2, byte c3, byte c4) {
return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3 || lex->chr0 == c4;
}
#endif
STATIC bool is_char_following(mp_lexer_t *lex, byte c) {
return lex->chr1 == c;
}
@@ -105,9 +111,15 @@ STATIC bool is_following_odigit(mp_lexer_t *lex) {
STATIC bool is_string_or_bytes(mp_lexer_t *lex) {
return is_char_or(lex, '\'', '\"')
|| (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"'))
|| ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r'))
&& is_char_following_following_or(lex, '\'', '\"'));
#if MICROPY_PY_FSTRINGS
|| (is_char_or4(lex, 'r', 'u', 'b', 'f') && is_char_following_or(lex, '\'', '\"'))
|| (((is_char_and(lex, 'r', 'f') || is_char_and(lex, 'f', 'r'))
&& is_char_following_following_or(lex, '\'', '\"')))
#else
|| (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"'))
#endif
|| ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r'))
&& is_char_following_following_or(lex, '\'', '\"'));
}
// to easily parse utf-8 identifiers we allow any raw byte with high bit set
@@ -132,9 +144,35 @@ STATIC void next_char(mp_lexer_t *lex) {
++lex->column;
}
// shift the input queue forward
lex->chr0 = lex->chr1;
lex->chr1 = lex->chr2;
lex->chr2 = lex->reader.readbyte(lex->reader.data);
// and add the next byte from either the fstring args or the reader
#if MICROPY_PY_FSTRINGS
if (lex->fstring_args_idx) {
// if there are saved chars, then we're currently injecting fstring args
if (lex->fstring_args_idx < lex->fstring_args.len) {
lex->chr2 = lex->fstring_args.buf[lex->fstring_args_idx++];
} else {
// no more fstring arg bytes
lex->chr2 = '\0';
}
if (lex->chr0 == '\0') {
// consumed all fstring data, restore saved input queue
lex->chr0 = lex->chr0_saved;
lex->chr1 = lex->chr1_saved;
lex->chr2 = lex->chr2_saved;
// stop consuming fstring arg data
vstr_reset(&lex->fstring_args);
lex->fstring_args_idx = 0;
}
} else
#endif
{
lex->chr2 = lex->reader.readbyte(lex->reader.data);
}
if (lex->chr1 == '\r') {
// CR is a new line, converted to LF
@@ -174,7 +212,8 @@ STATIC void indent_pop(mp_lexer_t *lex) {
// this means if the start of two ops are the same then they are equal til the last char
STATIC const char *const tok_enc =
"()[]{},:;~" // singles
"()[]{},;~" // singles
":e=" // : :=
"<e=c<e=" // < <= << <<=
">e=c>e=" // > >= >> >>=
"*e=c*e=" // * *= ** **=
@@ -194,8 +233,9 @@ STATIC const uint8_t tok_enc_kind[] = {
MP_TOKEN_DEL_PAREN_OPEN, MP_TOKEN_DEL_PAREN_CLOSE,
MP_TOKEN_DEL_BRACKET_OPEN, MP_TOKEN_DEL_BRACKET_CLOSE,
MP_TOKEN_DEL_BRACE_OPEN, MP_TOKEN_DEL_BRACE_CLOSE,
MP_TOKEN_DEL_COMMA, MP_TOKEN_DEL_COLON, MP_TOKEN_DEL_SEMICOLON, MP_TOKEN_OP_TILDE,
MP_TOKEN_DEL_COMMA, MP_TOKEN_DEL_SEMICOLON, MP_TOKEN_OP_TILDE,
MP_TOKEN_DEL_COLON, MP_TOKEN_OP_ASSIGN,
MP_TOKEN_OP_LESS, MP_TOKEN_OP_LESS_EQUAL, MP_TOKEN_OP_DBL_LESS, MP_TOKEN_DEL_DBL_LESS_EQUAL,
MP_TOKEN_OP_MORE, MP_TOKEN_OP_MORE_EQUAL, MP_TOKEN_OP_DBL_MORE, MP_TOKEN_DEL_DBL_MORE_EQUAL,
MP_TOKEN_OP_STAR, MP_TOKEN_DEL_STAR_EQUAL, MP_TOKEN_OP_DBL_STAR, MP_TOKEN_DEL_DBL_STAR_EQUAL,
@@ -270,7 +310,7 @@ STATIC bool get_hex(mp_lexer_t *lex, size_t num_digits, mp_uint_t *result) {
return true;
}
STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw) {
STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw, bool is_fstring) {
// get first quoting character
char quote_char = '\'';
if (is_char(lex, '\"')) {
@@ -291,12 +331,57 @@ STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw) {
}
size_t n_closing = 0;
#if MICROPY_PY_FSTRINGS
if (is_fstring) {
// assume there's going to be interpolation, so prep the injection data
// fstring_args_idx==0 && len(fstring_args)>0 means we're extracting the args.
// only when fstring_args_idx>0 will we consume the arg data
// note: lex->fstring_args will be empty already (it's reset when finished)
vstr_add_str(&lex->fstring_args, ".format(");
}
#endif
while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) {
if (is_char(lex, quote_char)) {
n_closing += 1;
vstr_add_char(&lex->vstr, CUR_CHAR(lex));
} else {
n_closing = 0;
#if MICROPY_PY_FSTRINGS
while (is_fstring && is_char(lex, '{')) {
next_char(lex);
if (is_char(lex, '{')) {
// "{{" is passed through unchanged to be handled by str.format
vstr_add_byte(&lex->vstr, '{');
next_char(lex);
} else {
// remember the start of this argument (if we need it for f'{a=}').
size_t i = lex->fstring_args.len;
// extract characters inside the { until we reach the
// format specifier or closing }.
// (MicroPython limitation) note: this is completely unaware of
// Python syntax and will not handle any expression containing '}' or ':'.
// e.g. f'{"}"}' or f'{foo({})}'.
while (!is_end(lex) && !is_char_or(lex, ':', '}')) {
// like the default case at the end of this function, stay 8-bit clean
vstr_add_byte(&lex->fstring_args, CUR_CHAR(lex));
next_char(lex);
}
if (lex->fstring_args.buf[lex->fstring_args.len - 1] == '=') {
// if the last character of the arg was '=', then inject "arg=" before the '{'.
// f'{a=}' --> 'a={}'.format(a)
vstr_add_strn(&lex->vstr, lex->fstring_args.buf + i, lex->fstring_args.len - i);
// remove the trailing '='
lex->fstring_args.len--;
}
// comma-separate args
vstr_add_byte(&lex->fstring_args, ',');
}
vstr_add_byte(&lex->vstr, '{');
}
#endif
if (is_char(lex, '\\')) {
next_char(lex);
unichar c = CUR_CHAR(lex);
@@ -307,17 +392,36 @@ STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw) {
switch (c) {
// note: "c" can never be MP_LEXER_EOF because next_char
// always inserts a newline at the end of the input stream
case '\n': c = MP_LEXER_EOF; break; // backslash escape the newline, just ignore it
case '\\': break;
case '\'': break;
case '"': break;
case 'a': c = 0x07; break;
case 'b': c = 0x08; break;
case 't': c = 0x09; break;
case 'n': c = 0x0a; break;
case 'v': c = 0x0b; break;
case 'f': c = 0x0c; break;
case 'r': c = 0x0d; break;
case '\n':
c = MP_LEXER_EOF;
break; // backslash escape the newline, just ignore it
case '\\':
break;
case '\'':
break;
case '"':
break;
case 'a':
c = 0x07;
break;
case 'b':
c = 0x08;
break;
case 't':
c = 0x09;
break;
case 'n':
c = 0x0a;
break;
case 'v':
c = 0x0b;
break;
case 'f':
c = 0x0c;
break;
case 'r':
c = 0x0d;
break;
case 'u':
case 'U':
if (lex->tok_kind == MP_TOKEN_BYTES) {
@@ -326,8 +430,8 @@ STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw) {
break;
}
// Otherwise fall through.
case 'x':
{
MP_FALLTHROUGH
case 'x': {
mp_uint_t num = 0;
if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) {
// not enough hex chars for escape sequence
@@ -342,7 +446,7 @@ STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw) {
// 3MB of text; even gzip-compressed and with minimal structure, it'll take
// roughly half a meg of storage. This form of Unicode escape may be added
// later on, but it's definitely not a priority right now. -- CJA 20140607
mp_raise_NotImplementedError("unicode name escapes");
mp_raise_NotImplementedError(MP_ERROR_TEXT("unicode name escapes"));
break;
default:
if (c >= '0' && c <= '7') {
@@ -430,6 +534,23 @@ STATIC bool skip_whitespace(mp_lexer_t *lex, bool stop_at_newline) {
}
void mp_lexer_to_next(mp_lexer_t *lex) {
#if MICROPY_PY_FSTRINGS
if (lex->fstring_args.len && lex->fstring_args_idx == 0) {
// moving onto the next token means the literal string is complete.
// switch into injecting the format args.
vstr_add_byte(&lex->fstring_args, ')');
lex->chr0_saved = lex->chr0;
lex->chr1_saved = lex->chr1;
lex->chr2_saved = lex->chr2;
lex->chr0 = lex->fstring_args.buf[0];
lex->chr1 = lex->fstring_args.buf[1];
lex->chr2 = lex->fstring_args.buf[2];
// we've already extracted 3 chars, but setting this non-zero also
// means we'll start consuming the fstring data
lex->fstring_args_idx = 3;
}
#endif
// start new token text
vstr_reset(&lex->vstr);
@@ -485,6 +606,7 @@ void mp_lexer_to_next(mp_lexer_t *lex) {
do {
// parse type codes
bool is_raw = false;
bool is_fstring = false;
mp_token_kind_t kind = MP_TOKEN_STRING;
int n_char = 0;
if (is_char(lex, 'u')) {
@@ -503,7 +625,25 @@ void mp_lexer_to_next(mp_lexer_t *lex) {
kind = MP_TOKEN_BYTES;
n_char = 2;
}
#if MICROPY_PY_FSTRINGS
if (is_char_following(lex, 'f')) {
// raw-f-strings unsupported, immediately return (invalid) token.
lex->tok_kind = MP_TOKEN_FSTRING_RAW;
break;
}
#endif
}
#if MICROPY_PY_FSTRINGS
else if (is_char(lex, 'f')) {
if (is_char_following(lex, 'r')) {
// raw-f-strings unsupported, immediately return (invalid) token.
lex->tok_kind = MP_TOKEN_FSTRING_RAW;
break;
}
n_char = 1;
is_fstring = true;
}
#endif
// Set or check token kind
if (lex->tok_kind == MP_TOKEN_END) {
@@ -522,7 +662,7 @@ void mp_lexer_to_next(mp_lexer_t *lex) {
}
// Parse the literal
parse_string_literal(lex, is_raw);
parse_string_literal(lex, is_raw, is_fstring);
// Skip whitespace so we can check if there's another string following
skip_whitespace(lex, true);
@@ -682,6 +822,9 @@ mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader) {
lex->num_indent_level = 1;
lex->indent_level = m_new(uint16_t, lex->alloc_indent_level);
vstr_init(&lex->vstr, 32);
#if MICROPY_PY_FSTRINGS
vstr_init(&lex->fstring_args, 0);
#endif
// store sentinel for first indentation level
lex->indent_level[0] = 0;
@@ -707,7 +850,7 @@ mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader) {
mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len) {
mp_reader_t reader;
mp_reader_new_mem(&reader, (const byte*)str, len, free_len);
mp_reader_new_mem(&reader, (const byte *)str, len, free_len);
return mp_lexer_new(src_name, reader);
}
@@ -735,6 +878,9 @@ void mp_lexer_free(mp_lexer_t *lex) {
if (lex) {
lex->reader.close(lex->reader.data);
vstr_clear(&lex->vstr);
#if MICROPY_PY_FSTRINGS
vstr_clear(&lex->fstring_args);
#endif
m_del(uint16_t, lex->indent_level, lex->alloc_indent_level);
m_del_obj(mp_lexer_t, lex);
}

View File

@@ -44,6 +44,10 @@ typedef enum _mp_token_kind_t {
MP_TOKEN_INVALID,
MP_TOKEN_DEDENT_MISMATCH,
MP_TOKEN_LONELY_STRING_OPEN,
#if MICROPY_PY_FSTRINGS
MP_TOKEN_MALFORMED_FSTRING,
MP_TOKEN_FSTRING_RAW,
#endif
MP_TOKEN_NEWLINE,
MP_TOKEN_INDENT,
@@ -96,6 +100,7 @@ typedef enum _mp_token_kind_t {
MP_TOKEN_KW_WITH,
MP_TOKEN_KW_YIELD,
MP_TOKEN_OP_ASSIGN,
MP_TOKEN_OP_TILDE,
// Order of these 6 matches corresponding mp_binary_op_t operator
@@ -157,6 +162,9 @@ typedef struct _mp_lexer_t {
mp_reader_t reader; // stream source
unichar chr0, chr1, chr2; // current cached characters from source
#if MICROPY_PY_FSTRINGS
unichar chr0_saved, chr1_saved, chr2_saved; // current cached characters from alt source
#endif
size_t line; // current source line
size_t column; // current source column
@@ -172,6 +180,10 @@ typedef struct _mp_lexer_t {
size_t tok_column; // token source column
mp_token_kind_t tok_kind; // token kind
vstr_t vstr; // token data
#if MICROPY_PY_FSTRINGS
vstr_t fstring_args; // extracted arguments to pass to .format()
size_t fstring_args_idx; // how many bytes of fstring_args have been read
#endif
} mp_lexer_t;
mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader);

View File

@@ -0,0 +1,205 @@
from __future__ import print_function
import collections
import re
import sys
import gzip
import zlib
_COMPRESSED_MARKER = 0xFF
def check_non_ascii(msg):
for c in msg:
if ord(c) >= 0x80:
print(
'Unable to generate compressed data: message "{}" contains a non-ascii character "{}".'.format(
msg, c
),
file=sys.stderr,
)
sys.exit(1)
# Replace <char><space> with <char | 0x80>.
# Trival scheme to demo/test.
def space_compression(error_strings):
for line in error_strings:
check_non_ascii(line)
result = ""
for i in range(len(line)):
if i > 0 and line[i] == " ":
result = result[:-1]
result += "\\{:03o}".format(ord(line[i - 1]))
else:
result += line[i]
error_strings[line] = result
return None
# Replace common words with <0x80 | index>.
# Index is into a table of words stored as aaaaa<0x80|a>bbb<0x80|b>...
# Replaced words are assumed to have spaces either side to avoid having to store the spaces in the compressed strings.
def word_compression(error_strings):
topn = collections.Counter()
for line in error_strings.keys():
check_non_ascii(line)
for word in line.split(" "):
topn[word] += 1
# Order not just by frequency, but by expected saving. i.e. prefer a longer string that is used less frequently.
# Use the word itself for ties so that compression is deterministic.
def bytes_saved(item):
w, n = item
return -((len(w) + 1) * (n - 1)), w
top128 = sorted(topn.items(), key=bytes_saved)[:128]
index = [w for w, _ in top128]
index_lookup = {w: i for i, w in enumerate(index)}
for line in error_strings.keys():
result = ""
need_space = False
for word in line.split(" "):
if word in index_lookup:
result += "\\{:03o}".format(0b10000000 | index_lookup[word])
need_space = False
else:
if need_space:
result += " "
need_space = True
result += word
error_strings[line] = result.strip()
return "".join(w[:-1] + "\\{:03o}".format(0b10000000 | ord(w[-1])) for w in index)
# Replace chars in text with variable length bit sequence.
# For comparison only (the table is not emitted).
def huffman_compression(error_strings):
# https://github.com/tannewt/huffman
import huffman
all_strings = "".join(error_strings)
cb = huffman.codebook(collections.Counter(all_strings).items())
for line in error_strings:
b = "1"
for c in line:
b += cb[c]
n = len(b)
if n % 8 != 0:
n += 8 - (n % 8)
result = ""
for i in range(0, n, 8):
result += "\\{:03o}".format(int(b[i : i + 8], 2))
if len(result) > len(line) * 4:
result = line
error_strings[line] = result
# TODO: This would be the prefix lengths and the table ordering.
return "_" * (10 + len(cb))
# Replace common N-letter sequences with <0x80 | index>, where
# the common sequences are stored in a separate table.
# This isn't very useful, need a smarter way to find top-ngrams.
def ngram_compression(error_strings):
topn = collections.Counter()
N = 2
for line in error_strings.keys():
check_non_ascii(line)
if len(line) < N:
continue
for i in range(0, len(line) - N, N):
topn[line[i : i + N]] += 1
def bytes_saved(item):
w, n = item
return -(len(w) * (n - 1))
top128 = sorted(topn.items(), key=bytes_saved)[:128]
index = [w for w, _ in top128]
index_lookup = {w: i for i, w in enumerate(index)}
for line in error_strings.keys():
result = ""
for i in range(0, len(line) - N + 1, N):
word = line[i : i + N]
if word in index_lookup:
result += "\\{:03o}".format(0b10000000 | index_lookup[word])
else:
result += word
if len(line) % N != 0:
result += line[len(line) - len(line) % N :]
error_strings[line] = result.strip()
return "".join(index)
def main(collected_path, fn):
error_strings = collections.OrderedDict()
max_uncompressed_len = 0
num_uses = 0
# Read in all MP_ERROR_TEXT strings.
with open(collected_path, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
num_uses += 1
error_strings[line] = None
max_uncompressed_len = max(max_uncompressed_len, len(line))
# So that objexcept.c can figure out how big the buffer needs to be.
print("#define MP_MAX_UNCOMPRESSED_TEXT_LEN ({})".format(max_uncompressed_len))
# Run the compression.
compressed_data = fn(error_strings)
# Print the data table.
print('MP_COMPRESSED_DATA("{}")'.format(compressed_data))
# Print the replacements.
for uncomp, comp in error_strings.items():
if uncomp == comp:
prefix = ""
else:
prefix = "\\{:03o}".format(_COMPRESSED_MARKER)
print('MP_MATCH_COMPRESSED("{}", "{}{}")'.format(uncomp, prefix, comp))
# Used to calculate the "true" length of the (escaped) compressed strings.
def unescape(s):
return re.sub(r"\\\d\d\d", "!", s)
# Stats. Note this doesn't include the cost of the decompressor code.
uncomp_len = sum(len(s) + 1 for s in error_strings.keys())
comp_len = sum(1 + len(unescape(s)) + 1 for s in error_strings.values())
data_len = len(compressed_data) + 1 if compressed_data else 0
print("// Total input length: {}".format(uncomp_len))
print("// Total compressed length: {}".format(comp_len))
print("// Total data length: {}".format(data_len))
print("// Predicted saving: {}".format(uncomp_len - comp_len - data_len))
# Somewhat meaningless comparison to zlib/gzip.
all_input_bytes = "\\0".join(error_strings.keys()).encode()
print()
if hasattr(gzip, "compress"):
gzip_len = len(gzip.compress(all_input_bytes)) + num_uses * 4
print("// gzip length: {}".format(gzip_len))
print("// Percentage of gzip: {:.1f}%".format(100 * (comp_len + data_len) / gzip_len))
if hasattr(zlib, "compress"):
zlib_len = len(zlib.compress(all_input_bytes)) + num_uses * 4
print("// zlib length: {}".format(zlib_len))
print("// Percentage of zlib: {:.1f}%".format(100 * (comp_len + data_len) / zlib_len))
if __name__ == "__main__":
main(sys.argv[1], word_compression)

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python
# This pre-processor parses provided objects' c files for
# MP_REGISTER_MODULE(module_name, obj_module, enabled_define)
# These are used to generate a header with the required entries for
# "mp_rom_map_elem_t mp_builtin_module_table[]" in py/objmodule.c
from __future__ import print_function
import re
import io
import os
import argparse
pattern = re.compile(r"[\n;]\s*MP_REGISTER_MODULE\((.*?),\s*(.*?),\s*(.*?)\);", flags=re.DOTALL)
def find_c_file(obj_file, vpath):
"""Search vpaths for the c file that matches the provided object_file.
:param str obj_file: object file to find the matching c file for
:param List[str] vpath: List of base paths, similar to gcc vpath
:return: str path to c file or None
"""
c_file = None
relative_c_file = os.path.splitext(obj_file)[0] + ".c"
relative_c_file = relative_c_file.lstrip("/\\")
for p in vpath:
possible_c_file = os.path.join(p, relative_c_file)
if os.path.exists(possible_c_file):
c_file = possible_c_file
break
return c_file
def find_module_registrations(c_file):
"""Find any MP_REGISTER_MODULE definitions in the provided c file.
:param str c_file: path to c file to check
:return: List[(module_name, obj_module, enabled_define)]
"""
global pattern
if c_file is None:
# No c file to match the object file, skip
return set()
with io.open(c_file, encoding="utf-8") as c_file_obj:
return set(re.findall(pattern, c_file_obj.read()))
def generate_module_table_header(modules):
"""Generate header with module table entries for builtin modules.
:param List[(module_name, obj_module, enabled_define)] modules: module defs
:return: None
"""
# Print header file for all external modules.
mod_defs = []
print("// Automatically generated by makemoduledefs.py.\n")
for module_name, obj_module, enabled_define in modules:
mod_def = "MODULE_DEF_{}".format(module_name.upper())
mod_defs.append(mod_def)
print(
(
"#if ({enabled_define})\n"
" extern const struct _mp_obj_module_t {obj_module};\n"
" #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n"
"#else\n"
" #define {mod_def}\n"
"#endif\n"
).format(
module_name=module_name,
obj_module=obj_module,
enabled_define=enabled_define,
mod_def=mod_def,
)
)
print("\n#define MICROPY_REGISTERED_MODULES \\")
for mod_def in mod_defs:
print(" {mod_def} \\".format(mod_def=mod_def))
print("// MICROPY_REGISTERED_MODULES")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--vpath", default=".", help="comma separated list of folders to search for c files in"
)
parser.add_argument("files", nargs="*", help="list of c files to search")
args = parser.parse_args()
vpath = [p.strip() for p in args.vpath.split(",")]
modules = set()
for obj_file in args.files:
c_file = find_c_file(obj_file, vpath)
modules |= find_module_registrations(c_file)
generate_module_table_header(sorted(modules))
if __name__ == "__main__":
main()

View File

@@ -13,49 +13,50 @@ import sys
# - iterating through bytes is different
# - codepoint2name lives in a different module
import platform
if platform.python_version_tuple()[0] == '2':
if platform.python_version_tuple()[0] == "2":
bytes_cons = lambda val, enc=None: bytearray(val)
from htmlentitydefs import codepoint2name
elif platform.python_version_tuple()[0] == '3':
elif platform.python_version_tuple()[0] == "3":
bytes_cons = bytes
from html.entities import codepoint2name
# end compatibility code
codepoint2name[ord('-')] = 'hyphen';
codepoint2name[ord("-")] = "hyphen"
# add some custom names to map characters that aren't in HTML
codepoint2name[ord(' ')] = 'space'
codepoint2name[ord('\'')] = 'squot'
codepoint2name[ord(',')] = 'comma'
codepoint2name[ord('.')] = 'dot'
codepoint2name[ord(':')] = 'colon'
codepoint2name[ord(';')] = 'semicolon'
codepoint2name[ord('/')] = 'slash'
codepoint2name[ord('%')] = 'percent'
codepoint2name[ord('#')] = 'hash'
codepoint2name[ord('(')] = 'paren_open'
codepoint2name[ord(')')] = 'paren_close'
codepoint2name[ord('[')] = 'bracket_open'
codepoint2name[ord(']')] = 'bracket_close'
codepoint2name[ord('{')] = 'brace_open'
codepoint2name[ord('}')] = 'brace_close'
codepoint2name[ord('*')] = 'star'
codepoint2name[ord('!')] = 'bang'
codepoint2name[ord('\\')] = 'backslash'
codepoint2name[ord('+')] = 'plus'
codepoint2name[ord('$')] = 'dollar'
codepoint2name[ord('=')] = 'equals'
codepoint2name[ord('?')] = 'question'
codepoint2name[ord('@')] = 'at_sign'
codepoint2name[ord('^')] = 'caret'
codepoint2name[ord('|')] = 'pipe'
codepoint2name[ord('~')] = 'tilde'
codepoint2name[ord(" ")] = "space"
codepoint2name[ord("'")] = "squot"
codepoint2name[ord(",")] = "comma"
codepoint2name[ord(".")] = "dot"
codepoint2name[ord(":")] = "colon"
codepoint2name[ord(";")] = "semicolon"
codepoint2name[ord("/")] = "slash"
codepoint2name[ord("%")] = "percent"
codepoint2name[ord("#")] = "hash"
codepoint2name[ord("(")] = "paren_open"
codepoint2name[ord(")")] = "paren_close"
codepoint2name[ord("[")] = "bracket_open"
codepoint2name[ord("]")] = "bracket_close"
codepoint2name[ord("{")] = "brace_open"
codepoint2name[ord("}")] = "brace_close"
codepoint2name[ord("*")] = "star"
codepoint2name[ord("!")] = "bang"
codepoint2name[ord("\\")] = "backslash"
codepoint2name[ord("+")] = "plus"
codepoint2name[ord("$")] = "dollar"
codepoint2name[ord("=")] = "equals"
codepoint2name[ord("?")] = "question"
codepoint2name[ord("@")] = "at_sign"
codepoint2name[ord("^")] = "caret"
codepoint2name[ord("|")] = "pipe"
codepoint2name[ord("~")] = "tilde"
# static qstrs, should be sorted
static_qstr_list = [
"",
"__dir__", # Put __dir__ after empty qstr for builtin dir() to work
"__dir__", # Put __dir__ after empty qstr for builtin dir() to work
"\n",
" ",
"*",
@@ -229,15 +230,18 @@ def compute_hash(qstr, bytes_hash):
# Make sure that valid hash is never zero, zero means "hash not computed"
return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1
def qstr_escape(qst):
def esc_char(m):
c = ord(m.group(0))
try:
name = codepoint2name[c]
except KeyError:
name = '0x%02x' % c
return "_" + name + '_'
return re.sub(r'[^A-Za-z0-9_]', esc_char, qst)
name = "0x%02x" % c
return "_" + name + "_"
return re.sub(r"[^A-Za-z0-9_]", esc_char, qst)
def parse_input_headers(infiles):
qcfgs = {}
@@ -257,22 +261,22 @@ def parse_input_headers(infiles):
# read the qstrs in from the input files
for infile in infiles:
with open(infile, 'rt') as f:
with open(infile, "rt") as f:
for line in f:
line = line.strip()
# is this a config line?
match = re.match(r'^QCFG\((.+), (.+)\)', line)
match = re.match(r"^QCFG\((.+), (.+)\)", line)
if match:
value = match.group(2)
if value[0] == '(' and value[-1] == ')':
if value[0] == "(" and value[-1] == ")":
# strip parenthesis from config value
value = value[1:-1]
qcfgs[match.group(1)] = value
continue
# is this a QSTR line?
match = re.match(r'^Q\((.*)\)$', line)
match = re.match(r"^Q\((.*)\)$", line)
if not match:
continue
@@ -280,10 +284,10 @@ def parse_input_headers(infiles):
qstr = match.group(1)
# special cases to specify control characters
if qstr == '\\n':
qstr = '\n'
elif qstr == '\\r\\n':
qstr = '\r\n'
if qstr == "\\n":
qstr = "\n"
elif qstr == "\\r\\n":
qstr = "\r\n"
# work out the corresponding qstr name
ident = qstr_escape(qstr)
@@ -312,43 +316,54 @@ def parse_input_headers(infiles):
return qcfgs, qstrs
def make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr):
qbytes = bytes_cons(qstr, 'utf8')
qbytes = bytes_cons(qstr, "utf8")
qlen = len(qbytes)
qhash = compute_hash(qbytes, cfg_bytes_hash)
if all(32 <= ord(c) <= 126 and c != '\\' and c != '"' for c in qstr):
if all(32 <= ord(c) <= 126 and c != "\\" and c != '"' for c in qstr):
# qstr is all printable ASCII so render it as-is (for easier debugging)
qdata = qstr
else:
# qstr contains non-printable codes so render entire thing as hex pairs
qdata = ''.join(('\\x%02x' % b) for b in qbytes)
qdata = "".join(("\\x%02x" % b) for b in qbytes)
if qlen >= (1 << (8 * cfg_bytes_len)):
print('qstr is too long:', qstr)
print("qstr is too long:", qstr)
assert False
qlen_str = ('\\x%02x' * cfg_bytes_len) % tuple(((qlen >> (8 * i)) & 0xff) for i in range(cfg_bytes_len))
qhash_str = ('\\x%02x' * cfg_bytes_hash) % tuple(((qhash >> (8 * i)) & 0xff) for i in range(cfg_bytes_hash))
qlen_str = ("\\x%02x" * cfg_bytes_len) % tuple(
((qlen >> (8 * i)) & 0xFF) for i in range(cfg_bytes_len)
)
qhash_str = ("\\x%02x" * cfg_bytes_hash) % tuple(
((qhash >> (8 * i)) & 0xFF) for i in range(cfg_bytes_hash)
)
return '(const byte*)"%s%s" "%s"' % (qhash_str, qlen_str, qdata)
def print_qstr_data(qcfgs, qstrs):
# get config variables
cfg_bytes_len = int(qcfgs['BYTES_IN_LEN'])
cfg_bytes_hash = int(qcfgs['BYTES_IN_HASH'])
cfg_bytes_len = int(qcfgs["BYTES_IN_LEN"])
cfg_bytes_hash = int(qcfgs["BYTES_IN_HASH"])
# print out the starter of the generated C header file
print('// This file was automatically generated by makeqstrdata.py')
print('')
print("// This file was automatically generated by makeqstrdata.py")
print("")
# add NULL qstr with no hash or data
print('QDEF(MP_QSTRnull, (const byte*)"%s%s" "")' % ('\\x00' * cfg_bytes_hash, '\\x00' * cfg_bytes_len))
print(
'QDEF(MP_QSTRnull, (const byte*)"%s%s" "")'
% ("\\x00" * cfg_bytes_hash, "\\x00" * cfg_bytes_len)
)
# go through each qstr and print it out
for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
qbytes = make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr)
print('QDEF(MP_QSTR_%s, %s)' % (ident, qbytes))
print("QDEF(MP_QSTR_%s, %s)" % (ident, qbytes))
def do_work(infiles):
qcfgs, qstrs = parse_input_headers(infiles)
print_qstr_data(qcfgs, qstrs)
if __name__ == "__main__":
do_work(sys.argv[1:])

View File

@@ -0,0 +1,210 @@
"""
This script processes the output from the C preprocessor and extracts all
qstr. Each qstr is transformed into a qstr definition of the form 'Q(...)'.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import io
import os
import re
import subprocess
import sys
import multiprocessing, multiprocessing.dummy
# Extract MP_QSTR_FOO macros.
_MODE_QSTR = "qstr"
# Extract MP_COMPRESSED_ROM_TEXT("") macros. (Which come from MP_ERROR_TEXT)
_MODE_COMPRESS = "compress"
def preprocess():
if any(src in args.dependencies for src in args.changed_sources):
sources = args.sources
elif any(args.changed_sources):
sources = args.changed_sources
else:
sources = args.sources
csources = []
cxxsources = []
for source in sources:
if source.endswith(".cpp"):
cxxsources.append(source)
elif source.endswith(".c"):
csources.append(source)
try:
os.makedirs(os.path.dirname(args.output[0]))
except OSError:
pass
def pp(flags):
def run(files):
return subprocess.check_output(args.pp + flags + files)
return run
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
p = multiprocessing.dummy.Pool(cpus)
with open(args.output[0], "wb") as out_file:
for flags, sources in (
(args.cflags, csources),
(args.cxxflags, cxxsources),
):
batch_size = (len(sources) + cpus - 1) // cpus
chunks = [sources[i : i + batch_size] for i in range(0, len(sources), batch_size or 1)]
for output in p.imap(pp(flags), chunks):
out_file.write(output)
def write_out(fname, output):
if output:
for m, r in [("/", "__"), ("\\", "__"), (":", "@"), ("..", "@@")]:
fname = fname.replace(m, r)
with open(args.output_dir + "/" + fname + "." + args.mode, "w") as f:
f.write("\n".join(output) + "\n")
def process_file(f):
re_line = re.compile(r"#[line]*\s\d+\s\"([^\"]+)\"")
if args.mode == _MODE_QSTR:
re_match = re.compile(r"MP_QSTR_[_a-zA-Z0-9]+")
elif args.mode == _MODE_COMPRESS:
re_match = re.compile(r'MP_COMPRESSED_ROM_TEXT\("([^"]*)"\)')
output = []
last_fname = None
for line in f:
if line.isspace():
continue
# match gcc-like output (# n "file") and msvc-like output (#line n "file")
if line.startswith(("# ", "#line")):
m = re_line.match(line)
assert m is not None
fname = m.group(1)
if os.path.splitext(fname)[1] not in [".c", ".cpp"]:
continue
if fname != last_fname:
write_out(last_fname, output)
output = []
last_fname = fname
continue
for match in re_match.findall(line):
if args.mode == _MODE_QSTR:
name = match.replace("MP_QSTR_", "")
output.append("Q(" + name + ")")
elif args.mode == _MODE_COMPRESS:
output.append(match)
if last_fname:
write_out(last_fname, output)
return ""
def cat_together():
import glob
import hashlib
hasher = hashlib.md5()
all_lines = []
outf = open(args.output_dir + "/out", "wb")
for fname in glob.glob(args.output_dir + "/*." + args.mode):
with open(fname, "rb") as f:
lines = f.readlines()
all_lines += lines
all_lines.sort()
all_lines = b"\n".join(all_lines)
outf.write(all_lines)
outf.close()
hasher.update(all_lines)
new_hash = hasher.hexdigest()
# print(new_hash)
old_hash = None
try:
with open(args.output_file + ".hash") as f:
old_hash = f.read()
except IOError:
pass
mode_full = "QSTR"
if args.mode == _MODE_COMPRESS:
mode_full = "Compressed data"
if old_hash != new_hash:
print(mode_full, "updated")
try:
# rename below might fail if file exists
os.remove(args.output_file)
except:
pass
os.rename(args.output_dir + "/out", args.output_file)
with open(args.output_file + ".hash", "w") as f:
f.write(new_hash)
else:
print(mode_full, "not updated")
if __name__ == "__main__":
if len(sys.argv) < 6:
print("usage: %s command mode input_filename output_dir output_file" % sys.argv[0])
sys.exit(2)
class Args:
pass
args = Args()
args.command = sys.argv[1]
if args.command == "pp":
named_args = {
s: []
for s in [
"pp",
"output",
"cflags",
"cxxflags",
"sources",
"changed_sources",
"dependencies",
]
}
for arg in sys.argv[1:]:
if arg in named_args:
current_tok = arg
else:
named_args[current_tok].append(arg)
if not named_args["pp"] or len(named_args["output"]) != 1:
print("usage: %s %s ..." % (sys.argv[0], " ... ".join(named_args)))
sys.exit(2)
for k, v in named_args.items():
setattr(args, k, v)
preprocess()
sys.exit(0)
args.mode = sys.argv[2]
args.input_filename = sys.argv[3] # Unused for command=cat
args.output_dir = sys.argv[4]
args.output_file = None if len(sys.argv) == 5 else sys.argv[5] # Unused for command=split
if args.mode not in (_MODE_QSTR, _MODE_COMPRESS):
print("error: mode %s unrecognised" % sys.argv[2])
sys.exit(2)
try:
os.makedirs(args.output_dir)
except OSError:
pass
if args.command == "split":
with io.open(args.input_filename, encoding="utf-8") as infile:
process_file(infile)
if args.command == "cat":
cat_together()

View File

@@ -0,0 +1,117 @@
"""
Generate header file with macros defining MicroPython version info.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import sys
import os
import datetime
import subprocess
def get_version_info_from_git():
# Python 2.6 doesn't have check_output, so check for that
try:
subprocess.check_output
subprocess.check_call
except AttributeError:
return None
# Note: git describe doesn't work if no tag is available
try:
git_tag = subprocess.check_output(
["git", "describe", "--tags", "--dirty", "--always", "--match", "v[1-9].*"],
stderr=subprocess.STDOUT,
universal_newlines=True,
).strip()
except subprocess.CalledProcessError as er:
if er.returncode == 128:
# git exit code of 128 means no repository found
return None
git_tag = ""
except OSError:
return None
try:
git_hash = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"],
stderr=subprocess.STDOUT,
universal_newlines=True,
).strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
except OSError:
return None
try:
# Check if there are any modified files.
subprocess.check_call(
["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT
)
# Check if there are any staged files.
subprocess.check_call(
["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError:
git_hash += "-dirty"
except OSError:
return None
return git_tag, git_hash
def get_version_info_from_docs_conf():
with open(os.path.join(os.path.dirname(sys.argv[0]), "..", "docs", "conf.py")) as f:
for line in f:
if line.startswith("version = release = '"):
ver = line.strip().split(" = ")[2].strip("'")
git_tag = "v" + ver
return git_tag, "<no hash>"
return None
def make_version_header(filename):
# Get version info using git, with fallback to docs/conf.py
info = get_version_info_from_git()
if info is None:
info = get_version_info_from_docs_conf()
git_tag, git_hash = info
build_date = datetime.date.today()
if "SOURCE_DATE_EPOCH" in os.environ:
build_date = datetime.datetime.utcfromtimestamp(
int(os.environ["SOURCE_DATE_EPOCH"])
).date()
# Generate the file with the git and version info
file_data = """\
// This file was generated by py/makeversionhdr.py
#define MICROPY_GIT_TAG "%s"
#define MICROPY_GIT_HASH "%s"
#define MICROPY_BUILD_DATE "%s"
""" % (
git_tag,
git_hash,
build_date.strftime("%Y-%m-%d"),
)
# Check if the file contents changed from last time
write_file = True
if os.path.isfile(filename):
with open(filename, "r") as f:
existing_data = f.read()
if existing_data == file_data:
write_file = False
# Only write the file if we need to
if write_file:
print("GEN %s" % filename)
with open(filename, "w") as f:
f.write(file_data)
if __name__ == "__main__":
make_version_header(sys.argv[1])

View File

@@ -87,22 +87,22 @@ void *m_malloc(size_t num_bytes) {
if (ptr == NULL && num_bytes != 0) {
m_malloc_fail(num_bytes);
}
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
return ptr;
}
void *m_malloc_maybe(size_t num_bytes) {
void *ptr = malloc(num_bytes);
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
return ptr;
}
@@ -113,11 +113,11 @@ void *m_malloc_with_finaliser(size_t num_bytes) {
if (ptr == NULL && num_bytes != 0) {
m_malloc_fail(num_bytes);
}
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
MP_STATE_MEM(total_bytes_allocated) += num_bytes;
MP_STATE_MEM(current_bytes_allocated) += num_bytes;
UPDATE_PEAK();
#endif
#endif
DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
return ptr;
}
@@ -133,15 +133,16 @@ void *m_malloc0(size_t num_bytes) {
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes)
#else
void *m_realloc(void *ptr, size_t new_num_bytes) {
void *m_realloc(void *ptr, size_t new_num_bytes)
#endif
{
void *new_ptr = realloc(ptr, new_num_bytes);
if (new_ptr == NULL && new_num_bytes != 0) {
m_malloc_fail(new_num_bytes);
}
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
// At first thought, "Total bytes allocated" should only grow,
// after all, it's *total*. But consider for example 2K block
// shrunk to 1K and then grown to 2K again. It's still 2K
@@ -151,7 +152,7 @@ void *m_realloc(void *ptr, size_t new_num_bytes) {
MP_STATE_MEM(total_bytes_allocated) += diff;
MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
#endif
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
#else
@@ -161,12 +162,13 @@ void *m_realloc(void *ptr, size_t new_num_bytes) {
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move) {
void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move)
#else
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move)
#endif
{
void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
// At first thought, "Total bytes allocated" should only grow,
// after all, it's *total*. But consider for example 2K block
// shrunk to 1K and then grown to 2K again. It's still 2K
@@ -179,7 +181,7 @@ void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
MP_STATE_MEM(current_bytes_allocated) += diff;
UPDATE_PEAK();
}
#endif
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
#else
@@ -189,14 +191,15 @@ void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
}
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
void m_free(void *ptr, size_t num_bytes) {
void m_free(void *ptr, size_t num_bytes)
#else
void m_free(void *ptr) {
void m_free(void *ptr)
#endif
{
free(ptr);
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
#endif
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
DEBUG_printf("free %p, %d\n", ptr, num_bytes);
#else

View File

@@ -40,17 +40,6 @@
#define DEBUG_printf(...) (void)0
#endif
// Fixed empty map. Useful when need to call kw-receiving functions
// without any keywords from C, etc.
const mp_map_t mp_const_empty_map = {
.all_keys_are_qstrs = 0,
.is_fixed = 1,
.is_ordered = 1,
.used = 0,
.alloc = 0,
.table = NULL,
};
// This table of sizes is used to control the growth of hash tables.
// The first set of sizes are chosen so the allocation fits exactly in a
// 4-word GC block, and it's not so important for these small values to be
@@ -96,7 +85,7 @@ void mp_map_init_fixed_table(mp_map_t *map, size_t n, const mp_obj_t *table) {
map->all_keys_are_qstrs = 1;
map->is_fixed = 1;
map->is_ordered = 1;
map->table = (mp_map_elem_t*)table;
map->table = (mp_map_elem_t *)table;
}
// Differentiate from mp_map_clear() - semantics is different
@@ -177,6 +166,7 @@ mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t
--map->used;
memmove(elem, elem + 1, (top - elem - 1) * sizeof(*elem));
// put the found element after the end so the caller can access it if needed
// note: caller must NULL the value so the GC can clean up (e.g. see dict_get_helper).
elem = &map->table[map->used];
elem->key = MP_OBJ_NULL;
elem->value = value;

View File

@@ -53,32 +53,36 @@ typedef unsigned int uint;
// Static assertion macro
#define MP_STATIC_ASSERT(cond) ((void)sizeof(char[1 - 2 * !(cond)]))
// Round-up integer division
#define MP_CEIL_DIVIDE(a, b) (((a) + (b) - 1) / (b))
#define MP_ROUND_DIVIDE(a, b) (((a) + (b) / 2) / (b))
/** memory allocation ******************************************/
// TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element)
#define m_new(type, num) ((type*)(m_malloc(sizeof(type) * (num))))
#define m_new_maybe(type, num) ((type*)(m_malloc_maybe(sizeof(type) * (num))))
#define m_new0(type, num) ((type*)(m_malloc0(sizeof(type) * (num))))
#define m_new(type, num) ((type *)(m_malloc(sizeof(type) * (num))))
#define m_new_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num))))
#define m_new0(type, num) ((type *)(m_malloc0(sizeof(type) * (num))))
#define m_new_obj(type) (m_new(type, 1))
#define m_new_obj_maybe(type) (m_new_maybe(type, 1))
#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type*)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num)))
#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type*)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num)))
#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type *)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num)))
#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type *)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num)))
#if MICROPY_ENABLE_FINALISER
#define m_new_obj_with_finaliser(type) ((type*)(m_malloc_with_finaliser(sizeof(type))))
#define m_new_obj_var_with_finaliser(type, var_type, var_num) ((type*)m_malloc_with_finaliser(sizeof(type) + sizeof(var_type) * (var_num)))
#define m_new_obj_with_finaliser(type) ((type *)(m_malloc_with_finaliser(sizeof(type))))
#define m_new_obj_var_with_finaliser(type, var_type, var_num) ((type *)m_malloc_with_finaliser(sizeof(type) + sizeof(var_type) * (var_num)))
#else
#define m_new_obj_with_finaliser(type) m_new_obj(type)
#define m_new_obj_var_with_finaliser(type, var_type, var_num) m_new_obj_var(type, var_type, var_num)
#endif
#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
#define m_renew(type, ptr, old_num, new_num) ((type*)(m_realloc((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num))))
#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type*)(m_realloc_maybe((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num), (allow_move))))
#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num))))
#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type *)(m_realloc_maybe((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num), (allow_move))))
#define m_del(type, ptr, num) m_free(ptr, sizeof(type) * (num))
#define m_del_var(obj_type, var_type, var_num, ptr) (m_free(ptr, sizeof(obj_type) + sizeof(var_type) * (var_num)))
#else
#define m_renew(type, ptr, old_num, new_num) ((type*)(m_realloc((ptr), sizeof(type) * (new_num))))
#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type*)(m_realloc_maybe((ptr), sizeof(type) * (new_num), (allow_move))))
#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (new_num))))
#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type *)(m_realloc_maybe((ptr), sizeof(type) * (new_num), (allow_move))))
#define m_del(type, ptr, num) ((void)(num), m_free(ptr))
#define m_del_var(obj_type, var_type, var_num, ptr) ((void)(var_num), m_free(ptr))
#endif
@@ -111,7 +115,7 @@ size_t m_get_peak_bytes_allocated(void);
#define MP_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
// align ptr to the nearest multiple of "alignment"
#define MP_ALIGN(ptr, alignment) (void*)(((uintptr_t)(ptr) + ((alignment) - 1)) & ~((alignment) - 1))
#define MP_ALIGN(ptr, alignment) (void *)(((uintptr_t)(ptr) + ((alignment) - 1)) & ~((alignment) - 1))
/** unichar / UTF-8 *********************************************/
@@ -129,9 +133,16 @@ unichar utf8_get_char(const byte *s);
const byte *utf8_next_char(const byte *s);
size_t utf8_charlen(const byte *str, size_t len);
#else
static inline unichar utf8_get_char(const byte *s) { return *s; }
static inline const byte *utf8_next_char(const byte *s) { return s + 1; }
static inline size_t utf8_charlen(const byte *str, size_t len) { (void)str; return len; }
static inline unichar utf8_get_char(const byte *s) {
return *s;
}
static inline const byte *utf8_next_char(const byte *s) {
return s + 1;
}
static inline size_t utf8_charlen(const byte *str, size_t len) {
(void)str;
return len;
}
#endif
bool unichar_isspace(unichar c);
@@ -140,6 +151,7 @@ bool unichar_isprint(unichar c);
bool unichar_isdigit(unichar c);
bool unichar_isxdigit(unichar c);
bool unichar_isident(unichar c);
bool unichar_isalnum(unichar c);
bool unichar_isupper(unichar c);
bool unichar_islower(unichar c);
unichar unichar_tolower(unichar c);
@@ -168,9 +180,15 @@ void vstr_init_print(vstr_t *vstr, size_t alloc, struct _mp_print_t *print);
void vstr_clear(vstr_t *vstr);
vstr_t *vstr_new(size_t alloc);
void vstr_free(vstr_t *vstr);
static inline void vstr_reset(vstr_t *vstr) { vstr->len = 0; }
static inline char *vstr_str(vstr_t *vstr) { return vstr->buf; }
static inline size_t vstr_len(vstr_t *vstr) { return vstr->len; }
static inline void vstr_reset(vstr_t *vstr) {
vstr->len = 0;
}
static inline char *vstr_str(vstr_t *vstr) {
return vstr->buf;
}
static inline size_t vstr_len(vstr_t *vstr) {
return vstr->len;
}
void vstr_hint_size(vstr_t *vstr, size_t size);
char *vstr_extend(vstr_t *vstr, size_t size);
char *vstr_add_len(vstr_t *vstr, size_t len);
@@ -191,10 +209,10 @@ void vstr_printf(vstr_t *vstr, const char *fmt, ...);
#define CHECKBUF(buf, max_size) char buf[max_size + 1]; size_t buf##_len = max_size; char *buf##_p = buf;
#define CHECKBUF_RESET(buf, max_size) buf##_len = max_size; buf##_p = buf;
#define CHECKBUF_APPEND(buf, src, src_len) \
{ size_t l = MIN(src_len, buf##_len); \
memcpy(buf##_p, src, l); \
buf##_len -= l; \
buf##_p += l; }
{ size_t l = MIN(src_len, buf##_len); \
memcpy(buf##_p, src, l); \
buf##_len -= l; \
buf##_p += l; }
#define CHECKBUF_APPEND_0(buf) { *buf##_p = 0; }
#define CHECKBUF_LEN(buf) (buf##_p - buf)
@@ -210,14 +228,96 @@ extern mp_uint_t mp_verbose_flag;
/** float internals *************/
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
#define MP_FLOAT_EXP_BITS (11)
#define MP_FLOAT_FRAC_BITS (52)
typedef uint64_t mp_float_uint_t;
#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
#define MP_FLOAT_EXP_BITS (8)
#define MP_FLOAT_FRAC_BITS (23)
typedef uint32_t mp_float_uint_t;
#endif
#define MP_FLOAT_EXP_BIAS ((1 << (MP_FLOAT_EXP_BITS - 1)) - 1)
typedef union _mp_float_union_t {
mp_float_t f;
#if MP_ENDIANNESS_LITTLE
struct {
mp_float_uint_t frc : MP_FLOAT_FRAC_BITS;
mp_float_uint_t exp : MP_FLOAT_EXP_BITS;
mp_float_uint_t sgn : 1;
} p;
#else
struct {
mp_float_uint_t sgn : 1;
mp_float_uint_t exp : MP_FLOAT_EXP_BITS;
mp_float_uint_t frc : MP_FLOAT_FRAC_BITS;
} p;
#endif
mp_float_uint_t i;
} mp_float_union_t;
#endif // MICROPY_PY_BUILTINS_FLOAT
/** ROM string compression *************/
#if MICROPY_ROM_TEXT_COMPRESSION
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
#error "MICROPY_ERROR_REPORTING_NONE requires MICROPY_ROM_TEXT_COMPRESSION disabled"
#endif
#ifdef NO_QSTR
// Compression enabled but doing QSTR extraction.
// So leave MP_COMPRESSED_ROM_TEXT in place for makeqstrdefs.py / makecompresseddata.py to find them.
#else
// Compression enabled and doing a regular build.
// Map MP_COMPRESSED_ROM_TEXT to the compressed strings.
// Force usage of the MP_ERROR_TEXT macro by requiring an opaque type.
typedef struct {
#ifdef __clang__
// Fix "error: empty struct has size 0 in C, size 1 in C++".
char dummy;
#endif
} *mp_rom_error_text_t;
#include <string.h>
inline __attribute__((always_inline)) const char *MP_COMPRESSED_ROM_TEXT(const char *msg) {
// "genhdr/compressed.data.h" contains an invocation of the MP_MATCH_COMPRESSED macro for each compressed string.
// The giant if(strcmp) tree is optimized by the compiler, which turns this into a direct return of the compressed data.
#define MP_MATCH_COMPRESSED(a, b) if (strcmp(msg, a) == 0) { return b; } else
// It also contains a single invocation of the MP_COMPRESSED_DATA macro, we don't need that here.
#define MP_COMPRESSED_DATA(x)
#include "genhdr/compressed.data.h"
#undef MP_COMPRESSED_DATA
#undef MP_MATCH_COMPRESSED
return msg;
}
#endif
#else
// Compression not enabled, just make it a no-op.
typedef const char *mp_rom_error_text_t;
#define MP_COMPRESSED_ROM_TEXT(x) x
#endif // MICROPY_ROM_TEXT_COMPRESSION
// Might add more types of compressed text in the future.
// For now, forward directly to MP_COMPRESSED_ROM_TEXT.
#define MP_ERROR_TEXT(x) (mp_rom_error_text_t)MP_COMPRESSED_ROM_TEXT(x)
#endif // MICROPY_INCLUDED_PY_MISC_H

72
python/src/py/mkenv.mk Normal file
View File

@@ -0,0 +1,72 @@
ifneq ($(lastword a b),b)
$(error These Makefiles require make 3.81 or newer)
endif
# Set TOP to be the path to get from the current directory (where make was
# invoked) to the top of the tree. $(lastword $(MAKEFILE_LIST)) returns
# the name of this makefile relative to where make was invoked.
#
# We assume that this file is in the py directory so we use $(dir ) twice
# to get to the top of the tree.
THIS_MAKEFILE := $(lastword $(MAKEFILE_LIST))
TOP := $(patsubst %/py/mkenv.mk,%,$(THIS_MAKEFILE))
# Turn on increased build verbosity by defining BUILD_VERBOSE in your main
# Makefile or in your environment. You can also use V=1 on the make command
# line.
ifeq ("$(origin V)", "command line")
BUILD_VERBOSE=$(V)
endif
ifndef BUILD_VERBOSE
$(info Use make V=1 or set BUILD_VERBOSE in your environment to increase build verbosity.)
BUILD_VERBOSE = 0
endif
ifeq ($(BUILD_VERBOSE),0)
Q = @
else
Q =
endif
# default settings; can be overridden in main Makefile
PY_SRC ?= $(TOP)/py
BUILD ?= build
RM = rm
ECHO = @echo
CP = cp
MKDIR = mkdir
SED = sed
CAT = cat
TOUCH = touch
PYTHON = python3
AS = $(CROSS_COMPILE)as
CC = $(CROSS_COMPILE)gcc
CXX = $(CROSS_COMPILE)g++
GDB = $(CROSS_COMPILE)gdb
LD = $(CROSS_COMPILE)ld
OBJCOPY = $(CROSS_COMPILE)objcopy
SIZE = $(CROSS_COMPILE)size
STRIP = $(CROSS_COMPILE)strip
AR = $(CROSS_COMPILE)ar
MAKE_MANIFEST = $(PYTHON) $(TOP)/tools/makemanifest.py
MAKE_FROZEN = $(PYTHON) $(TOP)/tools/make-frozen.py
MPY_TOOL = $(PYTHON) $(TOP)/tools/mpy-tool.py
MPY_LIB_DIR = $(TOP)/../micropython-lib
ifeq ($(MICROPY_MPYCROSS),)
MICROPY_MPYCROSS = $(TOP)/mpy-cross/mpy-cross
MICROPY_MPYCROSS_DEPENDENCY = $(MICROPY_MPYCROSS)
endif
all:
.PHONY: all
.DELETE_ON_ERROR:
MKENV_INCLUDED = 1

155
python/src/py/mkrules.cmake Normal file
View File

@@ -0,0 +1,155 @@
# CMake fragment for MicroPython rules
set(MICROPY_GENHDR_DIR "${CMAKE_BINARY_DIR}/genhdr")
set(MICROPY_MPVERSION "${MICROPY_GENHDR_DIR}/mpversion.h")
set(MICROPY_MODULEDEFS "${MICROPY_GENHDR_DIR}/moduledefs.h")
set(MICROPY_QSTRDEFS_PY "${MICROPY_PY_DIR}/qstrdefs.h")
set(MICROPY_QSTRDEFS_LAST "${MICROPY_GENHDR_DIR}/qstr.i.last")
set(MICROPY_QSTRDEFS_SPLIT "${MICROPY_GENHDR_DIR}/qstr.split")
set(MICROPY_QSTRDEFS_COLLECTED "${MICROPY_GENHDR_DIR}/qstrdefs.collected.h")
set(MICROPY_QSTRDEFS_PREPROCESSED "${MICROPY_GENHDR_DIR}/qstrdefs.preprocessed.h")
set(MICROPY_QSTRDEFS_GENERATED "${MICROPY_GENHDR_DIR}/qstrdefs.generated.h")
# Provide defaults for preprocessor flags if not already defined
if(NOT MICROPY_CPP_FLAGS)
get_target_property(MICROPY_CPP_INC ${MICROPY_TARGET} INCLUDE_DIRECTORIES)
get_target_property(MICROPY_CPP_DEF ${MICROPY_TARGET} COMPILE_DEFINITIONS)
endif()
# Compute MICROPY_CPP_FLAGS for preprocessor
list(APPEND MICROPY_CPP_INC ${MICROPY_CPP_INC_EXTRA})
list(APPEND MICROPY_CPP_DEF ${MICROPY_CPP_DEF_EXTRA})
set(_prefix "-I")
foreach(_arg ${MICROPY_CPP_INC})
list(APPEND MICROPY_CPP_FLAGS ${_prefix}${_arg})
endforeach()
set(_prefix "-D")
foreach(_arg ${MICROPY_CPP_DEF})
list(APPEND MICROPY_CPP_FLAGS ${_prefix}${_arg})
endforeach()
list(APPEND MICROPY_CPP_FLAGS ${MICROPY_CPP_FLAGS_EXTRA})
find_package(Python3 REQUIRED COMPONENTS Interpreter)
target_sources(${MICROPY_TARGET} PRIVATE
${MICROPY_MPVERSION}
${MICROPY_QSTRDEFS_GENERATED}
)
# Command to force the build of another command
add_custom_command(
OUTPUT MICROPY_FORCE_BUILD
COMMENT ""
COMMAND echo -n
)
# Generate mpversion.h
add_custom_command(
OUTPUT ${MICROPY_MPVERSION}
COMMAND ${CMAKE_COMMAND} -E make_directory ${MICROPY_GENHDR_DIR}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_DIR}/py/makeversionhdr.py ${MICROPY_MPVERSION}
DEPENDS MICROPY_FORCE_BUILD
)
# Generate moduledefs.h
add_custom_command(
OUTPUT ${MICROPY_MODULEDEFS}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makemoduledefs.py --vpath="/" ${MICROPY_SOURCE_QSTR} > ${MICROPY_MODULEDEFS}
DEPENDS ${MICROPY_MPVERSION}
${MICROPY_SOURCE_QSTR}
)
# Generate qstrs
# If any of the dependencies in this rule change then the C-preprocessor step must be run.
# It only needs to be passed the list of MICROPY_SOURCE_QSTR files that have changed since
# it was last run, but it looks like it's not possible to specify that with cmake.
add_custom_command(
OUTPUT ${MICROPY_QSTRDEFS_LAST}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdefs.py pp ${CMAKE_C_COMPILER} -E output ${MICROPY_GENHDR_DIR}/qstr.i.last cflags ${MICROPY_CPP_FLAGS} -DNO_QSTR cxxflags ${MICROPY_CPP_FLAGS} -DNO_QSTR sources ${MICROPY_SOURCE_QSTR}
DEPENDS ${MICROPY_MODULEDEFS}
${MICROPY_SOURCE_QSTR}
VERBATIM
COMMAND_EXPAND_LISTS
)
add_custom_command(
OUTPUT ${MICROPY_QSTRDEFS_SPLIT}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdefs.py split qstr ${MICROPY_GENHDR_DIR}/qstr.i.last ${MICROPY_GENHDR_DIR}/qstr _
COMMAND touch ${MICROPY_QSTRDEFS_SPLIT}
DEPENDS ${MICROPY_QSTRDEFS_LAST}
VERBATIM
COMMAND_EXPAND_LISTS
)
add_custom_command(
OUTPUT ${MICROPY_QSTRDEFS_COLLECTED}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdefs.py cat qstr _ ${MICROPY_GENHDR_DIR}/qstr ${MICROPY_QSTRDEFS_COLLECTED}
DEPENDS ${MICROPY_QSTRDEFS_SPLIT}
VERBATIM
COMMAND_EXPAND_LISTS
)
add_custom_command(
OUTPUT ${MICROPY_QSTRDEFS_PREPROCESSED}
COMMAND cat ${MICROPY_QSTRDEFS_PY} ${MICROPY_QSTRDEFS_PORT} ${MICROPY_QSTRDEFS_COLLECTED} | sed "s/^Q(.*)/\"&\"/" | ${CMAKE_C_COMPILER} -E ${MICROPY_CPP_FLAGS} - | sed "s/^\\\"\\(Q(.*)\\)\\\"/\\1/" > ${MICROPY_QSTRDEFS_PREPROCESSED}
DEPENDS ${MICROPY_QSTRDEFS_PY}
${MICROPY_QSTRDEFS_PORT}
${MICROPY_QSTRDEFS_COLLECTED}
VERBATIM
COMMAND_EXPAND_LISTS
)
add_custom_command(
OUTPUT ${MICROPY_QSTRDEFS_GENERATED}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdata.py ${MICROPY_QSTRDEFS_PREPROCESSED} > ${MICROPY_QSTRDEFS_GENERATED}
DEPENDS ${MICROPY_QSTRDEFS_PREPROCESSED}
VERBATIM
COMMAND_EXPAND_LISTS
)
# Build frozen code if enabled
if(MICROPY_FROZEN_MANIFEST)
set(MICROPY_FROZEN_CONTENT "${CMAKE_BINARY_DIR}/frozen_content.c")
target_sources(${MICROPY_TARGET} PRIVATE
${MICROPY_FROZEN_CONTENT}
)
target_compile_definitions(${MICROPY_TARGET} PUBLIC
MICROPY_QSTR_EXTRA_POOL=mp_qstr_frozen_const_pool
MICROPY_MODULE_FROZEN_MPY=\(1\)
)
if(NOT MICROPY_LIB_DIR)
set(MICROPY_LIB_DIR ${MICROPY_DIR}/../micropython-lib)
endif()
# If MICROPY_MPYCROSS is not explicitly defined in the environment (which
# is what makemanifest.py will use) then create an mpy-cross dependency
# to automatically build mpy-cross if needed.
set(MICROPY_MPYCROSS $ENV{MICROPY_MPYCROSS})
if(NOT MICROPY_MPYCROSS)
set(MICROPY_MPYCROSS_DEPENDENCY ${MICROPY_DIR}/mpy-cross/mpy-cross)
if(NOT MICROPY_MAKE_EXECUTABLE)
set(MICROPY_MAKE_EXECUTABLE make)
endif()
add_custom_command(
OUTPUT ${MICROPY_MPYCROSS_DEPENDENCY}
COMMAND ${MICROPY_MAKE_EXECUTABLE} -C ${MICROPY_DIR}/mpy-cross
)
endif()
add_custom_command(
OUTPUT ${MICROPY_FROZEN_CONTENT}
COMMAND ${Python3_EXECUTABLE} ${MICROPY_DIR}/tools/makemanifest.py -o ${MICROPY_FROZEN_CONTENT} -v "MPY_DIR=${MICROPY_DIR}" -v "MPY_LIB_DIR=${MICROPY_LIB_DIR}" -v "PORT_DIR=${MICROPY_PORT_DIR}" -v "BOARD_DIR=${MICROPY_BOARD_DIR}" -b "${CMAKE_BINARY_DIR}" -f${MICROPY_CROSS_FLAGS} ${MICROPY_FROZEN_MANIFEST}
DEPENDS MICROPY_FORCE_BUILD
${MICROPY_QSTRDEFS_GENERATED}
${MICROPY_MPYCROSS_DEPENDENCY}
VERBATIM
)
endif()

270
python/src/py/mkrules.mk Normal file
View File

@@ -0,0 +1,270 @@
ifneq ($(MKENV_INCLUDED),1)
# We assume that mkenv is in the same directory as this file.
THIS_MAKEFILE = $(lastword $(MAKEFILE_LIST))
include $(dir $(THIS_MAKEFILE))mkenv.mk
endif
# Extra deps that need to happen before object compilation.
OBJ_EXTRA_ORDER_DEPS =
ifeq ($(MICROPY_ROM_TEXT_COMPRESSION),1)
# If compression is enabled, trigger the build of compressed.data.h...
OBJ_EXTRA_ORDER_DEPS += $(HEADER_BUILD)/compressed.data.h
# ...and enable the MP_COMPRESSED_ROM_TEXT macro (used by MP_ERROR_TEXT).
CFLAGS += -DMICROPY_ROM_TEXT_COMPRESSION=1
endif
# QSTR generation uses the same CFLAGS, with these modifications.
QSTR_GEN_FLAGS = -DNO_QSTR
# Note: := to force evalulation immediately.
QSTR_GEN_CFLAGS := $(CFLAGS)
QSTR_GEN_CFLAGS += $(QSTR_GEN_FLAGS)
QSTR_GEN_CXXFLAGS := $(CXXFLAGS)
QSTR_GEN_CXXFLAGS += $(QSTR_GEN_FLAGS)
# This file expects that OBJ contains a list of all of the object files.
# The directory portion of each object file is used to locate the source
# and should not contain any ..'s but rather be relative to the top of the
# tree.
#
# So for example, py/map.c would have an object file name py/map.o
# The object files will go into the build directory and mantain the same
# directory structure as the source tree. So the final dependency will look
# like this:
#
# build/py/map.o: py/map.c
#
# We set vpath to point to the top of the tree so that the source files
# can be located. By following this scheme, it allows a single build rule
# to be used to compile all .c files.
vpath %.S . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.S
$(ECHO) "CC $<"
$(Q)$(CC) $(CFLAGS) -c -o $@ $<
vpath %.s . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.s
$(ECHO) "AS $<"
$(Q)$(AS) -o $@ $<
define compile_c
$(ECHO) "CC $<"
$(Q)$(CC) $(CFLAGS) -c -MD -o $@ $<
@# The following fixes the dependency file.
@# See http://make.paulandlesley.org/autodep.html for details.
@# Regex adjusted from the above to play better with Windows paths, etc.
@$(CP) $(@:.o=.d) $(@:.o=.P); \
$(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \
$(RM) -f $(@:.o=.d)
endef
define compile_cxx
$(ECHO) "CXX $<"
$(Q)$(CXX) $(CXXFLAGS) -c -MD -o $@ $<
@# The following fixes the dependency file.
@# See http://make.paulandlesley.org/autodep.html for details.
@# Regex adjusted from the above to play better with Windows paths, etc.
@$(CP) $(@:.o=.d) $(@:.o=.P); \
$(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \
-e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \
$(RM) -f $(@:.o=.d)
endef
vpath %.c . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.c
$(call compile_c)
vpath %.cpp . $(TOP) $(USER_C_MODULES)
$(BUILD)/%.o: %.cpp
$(call compile_cxx)
$(BUILD)/%.pp: %.c
$(ECHO) "PreProcess $<"
$(Q)$(CPP) $(CFLAGS) -Wp,-C,-dD,-dI -o $@ $<
# The following rule uses | to create an order only prerequisite. Order only
# prerequisites only get built if they don't exist. They don't cause timestamp
# checking to be performed.
#
# We don't know which source files actually need the generated.h (since
# it is #included from str.h). The compiler generated dependencies will cause
# the right .o's to get recompiled if the generated.h file changes. Adding
# an order-only dependency to all of the .o's will cause the generated .h
# to get built before we try to compile any of them.
$(OBJ): | $(HEADER_BUILD)/qstrdefs.generated.h $(HEADER_BUILD)/mpversion.h $(OBJ_EXTRA_ORDER_DEPS)
# The logic for qstr regeneration (applied by makeqstrdefs.py) is:
# - if anything in QSTR_GLOBAL_DEPENDENCIES is newer, then process all source files ($^)
# - else, if list of newer prerequisites ($?) is not empty, then process just these ($?)
# - else, process all source files ($^) [this covers "make -B" which can set $? to empty]
# See more information about this process in docs/develop/qstr.rst.
$(HEADER_BUILD)/qstr.i.last: $(SRC_QSTR) $(QSTR_GLOBAL_DEPENDENCIES) $(HEADER_BUILD)/moduledefs.h | $(QSTR_GLOBAL_REQUIREMENTS)
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py pp $(CPP) output $(HEADER_BUILD)/qstr.i.last cflags $(QSTR_GEN_CFLAGS) cxxflags $(QSTR_GEN_CXXFLAGS) sources $^ dependencies $(QSTR_GLOBAL_DEPENDENCIES) changed_sources $?
$(HEADER_BUILD)/qstr.split: $(HEADER_BUILD)/qstr.i.last
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py split qstr $< $(HEADER_BUILD)/qstr _
$(Q)$(TOUCH) $@
$(QSTR_DEFS_COLLECTED): $(HEADER_BUILD)/qstr.split
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py cat qstr _ $(HEADER_BUILD)/qstr $@
# Compressed error strings.
$(HEADER_BUILD)/compressed.split: $(HEADER_BUILD)/qstr.i.last
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py split compress $< $(HEADER_BUILD)/compress _
$(Q)$(TOUCH) $@
$(HEADER_BUILD)/compressed.collected: $(HEADER_BUILD)/compressed.split
$(ECHO) "GEN $@"
$(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py cat compress _ $(HEADER_BUILD)/compress $@
# $(sort $(var)) removes duplicates
#
# The net effect of this, is it causes the objects to depend on the
# object directories (but only for existence), and the object directories
# will be created if they don't exist.
OBJ_DIRS = $(sort $(dir $(OBJ)))
$(OBJ): | $(OBJ_DIRS)
$(OBJ_DIRS):
$(MKDIR) -p $@
$(HEADER_BUILD):
$(MKDIR) -p $@
ifneq ($(MICROPY_MPYCROSS_DEPENDENCY),)
# to automatically build mpy-cross, if needed
$(MICROPY_MPYCROSS_DEPENDENCY):
$(MAKE) -C $(dir $@)
endif
ifneq ($(FROZEN_MANIFEST),)
# to build frozen_content.c from a manifest
$(BUILD)/frozen_content.c: FORCE $(BUILD)/genhdr/qstrdefs.generated.h | $(MICROPY_MPYCROSS_DEPENDENCY)
$(Q)$(MAKE_MANIFEST) -o $@ -v "MPY_DIR=$(TOP)" -v "MPY_LIB_DIR=$(MPY_LIB_DIR)" -v "PORT_DIR=$(shell pwd)" -v "BOARD_DIR=$(BOARD_DIR)" -b "$(BUILD)" $(if $(MPY_CROSS_FLAGS),-f"$(MPY_CROSS_FLAGS)",) --mpy-tool-flags="$(MPY_TOOL_FLAGS)" $(FROZEN_MANIFEST)
ifneq ($(FROZEN_DIR),)
$(error FROZEN_DIR cannot be used in conjunction with FROZEN_MANIFEST)
endif
ifneq ($(FROZEN_MPY_DIR),)
$(error FROZEN_MPY_DIR cannot be used in conjunction with FROZEN_MANIFEST)
endif
endif
ifneq ($(FROZEN_DIR),)
$(info Warning: FROZEN_DIR is deprecated in favour of FROZEN_MANIFEST)
$(BUILD)/frozen.c: $(wildcard $(FROZEN_DIR)/*) $(HEADER_BUILD) $(FROZEN_EXTRA_DEPS)
$(ECHO) "GEN $@"
$(Q)$(MAKE_FROZEN) $(FROZEN_DIR) > $@
endif
ifneq ($(FROZEN_MPY_DIR),)
$(info Warning: FROZEN_MPY_DIR is deprecated in favour of FROZEN_MANIFEST)
# make a list of all the .py files that need compiling and freezing
FROZEN_MPY_PY_FILES := $(shell find -L $(FROZEN_MPY_DIR) -type f -name '*.py' | $(SED) -e 's=^$(FROZEN_MPY_DIR)/==')
FROZEN_MPY_MPY_FILES := $(addprefix $(BUILD)/frozen_mpy/,$(FROZEN_MPY_PY_FILES:.py=.mpy))
# to build .mpy files from .py files
$(BUILD)/frozen_mpy/%.mpy: $(FROZEN_MPY_DIR)/%.py | $(MICROPY_MPYCROSS_DEPENDENCY)
@$(ECHO) "MPY $<"
$(Q)$(MKDIR) -p $(dir $@)
$(Q)$(MICROPY_MPYCROSS) -o $@ -s $(<:$(FROZEN_MPY_DIR)/%=%) $(MPY_CROSS_FLAGS) $<
# to build frozen_mpy.c from all .mpy files
$(BUILD)/frozen_mpy.c: $(FROZEN_MPY_MPY_FILES) $(BUILD)/genhdr/qstrdefs.generated.h
@$(ECHO) "GEN $@"
$(Q)$(MPY_TOOL) -f -q $(BUILD)/genhdr/qstrdefs.preprocessed.h $(FROZEN_MPY_MPY_FILES) > $@
endif
ifneq ($(PROG),)
# Build a standalone executable (unix does this)
# The executable should have an .exe extension for builds targetting 'pure'
# Windows, i.e. msvc or mingw builds, but not when using msys or cygwin's gcc.
COMPILER_TARGET := $(shell $(CC) -dumpmachine)
ifneq (,$(findstring mingw,$(COMPILER_TARGET)))
PROG := $(PROG).exe
endif
all: $(PROG)
$(PROG): $(OBJ)
$(ECHO) "LINK $@"
# Do not pass COPT here - it's *C* compiler optimizations. For example,
# we may want to compile using Thumb, but link with non-Thumb libc.
$(Q)$(CC) -o $@ $^ $(LIB) $(LDFLAGS)
ifndef DEBUG
$(Q)$(STRIP) $(STRIPFLAGS_EXTRA) $@
endif
$(Q)$(SIZE) $$(find $(BUILD) -path "$(BUILD)/build/frozen*.o") $@
clean: clean-prog
clean-prog:
$(RM) -f $(PROG)
$(RM) -f $(PROG).map
.PHONY: clean-prog
endif
submodules:
$(ECHO) "Updating submodules: $(GIT_SUBMODULES)"
ifneq ($(GIT_SUBMODULES),)
$(Q)git submodule sync $(addprefix $(TOP)/,$(GIT_SUBMODULES))
$(Q)git submodule update --init $(addprefix $(TOP)/,$(GIT_SUBMODULES))
endif
.PHONY: submodules
LIBMICROPYTHON = libmicropython.a
# We can execute extra commands after library creation using
# LIBMICROPYTHON_EXTRA_CMD. This may be needed e.g. to integrate
# with 3rd-party projects which don't have proper dependency
# tracking. Then LIBMICROPYTHON_EXTRA_CMD can e.g. touch some
# other file to cause needed effect, e.g. relinking with new lib.
lib $(LIBMICROPYTHON): $(OBJ)
$(Q)$(AR) rcs $(LIBMICROPYTHON) $^
$(LIBMICROPYTHON_EXTRA_CMD)
clean:
$(RM) -rf $(BUILD) $(CLEAN_EXTRA)
.PHONY: clean
# Clean every non-git file from FROZEN_DIR/FROZEN_MPY_DIR, but making a backup.
# We run rmdir below to avoid empty backup dir (it will silently fail if backup
# is non-empty).
clean-frozen:
if [ -n "$(FROZEN_MPY_DIR)" ]; then \
backup_dir=$(FROZEN_MPY_DIR).$$(date +%Y%m%dT%H%M%S); mkdir $$backup_dir; \
cd $(FROZEN_MPY_DIR); git status --ignored -u all -s . | awk ' {print $$2}' \
| xargs --no-run-if-empty cp --parents -t ../$$backup_dir; \
rmdir ../$$backup_dir 2>/dev/null || true; \
git clean -d -f .; \
fi
if [ -n "$(FROZEN_DIR)" ]; then \
backup_dir=$(FROZEN_DIR).$$(date +%Y%m%dT%H%M%S); mkdir $$backup_dir; \
cd $(FROZEN_DIR); git status --ignored -u all -s . | awk ' {print $$2}' \
| xargs --no-run-if-empty cp --parents -t ../$$backup_dir; \
rmdir ../$$backup_dir 2>/dev/null || true; \
git clean -d -f .; \
fi
.PHONY: clean-frozen
print-cfg:
$(ECHO) "PY_SRC = $(PY_SRC)"
$(ECHO) "BUILD = $(BUILD)"
$(ECHO) "OBJ = $(OBJ)"
.PHONY: print-cfg
print-def:
@$(ECHO) "The following defines are built into the $(CC) compiler"
$(TOUCH) __empty__.c
@$(CC) -E -Wp,-dM __empty__.c
@$(RM) -f __empty__.c
-include $(OBJ:.o=.P)

View File

@@ -37,7 +37,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_array_globals, mp_module_array_globals_tab
const mp_obj_module_t mp_module_uarray = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_array_globals,
.globals = (mp_obj_dict_t *)&mp_module_array_globals,
};
MP_REGISTER_MODULE(MP_QSTR_uarray, mp_module_uarray, MICROPY_PY_ARRAY);

View File

@@ -140,7 +140,8 @@ STATIC mp_obj_t mp_builtin_chr(mp_obj_t o_in) {
uint8_t str[4];
int len = 0;
if (c < 0x80) {
*str = c; len = 1;
*str = c;
len = 1;
} else if (c < 0x800) {
str[0] = (c >> 6) | 0xC0;
str[1] = (c & 0x3F) | 0x80;
@@ -157,16 +158,16 @@ STATIC mp_obj_t mp_builtin_chr(mp_obj_t o_in) {
str[3] = (c & 0x3F) | 0x80;
len = 4;
} else {
mp_raise_ValueError("chr() arg not in range(0x110000)");
mp_raise_ValueError(MP_ERROR_TEXT("chr() arg not in range(0x110000)"));
}
return mp_obj_new_str_via_qstr((char*)str, len);
return mp_obj_new_str_via_qstr((char *)str, len);
#else
mp_int_t ord = mp_obj_get_int(o_in);
if (0 <= ord && ord <= 0xff) {
uint8_t str[1] = {ord};
return mp_obj_new_str_via_qstr((char*)str, 1);
return mp_obj_new_str_via_qstr((char *)str, 1);
} else {
mp_raise_ValueError("chr() arg not in range(256)");
mp_raise_ValueError(MP_ERROR_TEXT("chr() arg not in range(256)"));
}
#endif
}
@@ -229,7 +230,7 @@ MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hex_obj, mp_builtin_hex);
#if MICROPY_PY_BUILTINS_INPUT
#include "py/mphal.h"
#include "lib/mp-readline/readline.h"
#include "shared/readline/readline.h"
// A port can define mp_hal_readline if they want to use a custom function here
#ifndef mp_hal_readline
@@ -244,10 +245,10 @@ STATIC mp_obj_t mp_builtin_input(size_t n_args, const mp_obj_t *args) {
vstr_init(&line, 16);
int ret = mp_hal_readline(&line, "");
if (ret == CHAR_CTRL_C) {
nlr_raise(mp_obj_new_exception(&mp_type_KeyboardInterrupt));
mp_raise_type(&mp_type_KeyboardInterrupt);
}
if (line.len == 0 && ret == CHAR_CTRL_D) {
nlr_raise(mp_obj_new_exception(&mp_type_EOFError));
mp_raise_type(&mp_type_EOFError);
}
return mp_obj_new_str_from_vstr(&mp_type_str, &line);
}
@@ -285,7 +286,7 @@ STATIC mp_obj_t mp_builtin_min_max(size_t n_args, const mp_obj_t *args, mp_map_t
if (default_elem != NULL) {
best_obj = default_elem->value;
} else {
mp_raise_ValueError("arg is an empty sequence");
mp_raise_ValueError(MP_ERROR_TEXT("arg is an empty sequence"));
}
}
return best_obj;
@@ -321,7 +322,7 @@ STATIC mp_obj_t mp_builtin_next(size_t n_args, const mp_obj_t *args) {
if (n_args == 1) {
mp_obj_t ret = mp_iternext_allow_raise(args[0]);
if (ret == MP_OBJ_STOP_ITERATION) {
nlr_raise(mp_obj_new_exception(&mp_type_StopIteration));
mp_raise_StopIteration(MP_STATE_THREAD(stop_iteration_arg));
} else {
return ret;
}
@@ -335,7 +336,7 @@ MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj, 1, 2, mp_builtin_next);
STATIC mp_obj_t mp_builtin_next(mp_obj_t o) {
mp_obj_t ret = mp_iternext_allow_raise(o);
if (ret == MP_OBJ_STOP_ITERATION) {
nlr_raise(mp_obj_new_exception(&mp_type_StopIteration));
mp_raise_StopIteration(MP_STATE_THREAD(stop_iteration_arg));
} else {
return ret;
}
@@ -355,7 +356,7 @@ MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_oct_obj, mp_builtin_oct);
STATIC mp_obj_t mp_builtin_ord(mp_obj_t o_in) {
size_t len;
const byte *str = (const byte*)mp_obj_str_get_data(o_in, &len);
const byte *str = (const byte *)mp_obj_str_get_data(o_in, &len);
#if MICROPY_PY_BUILTINS_STR_UNICODE
if (mp_obj_is_str(o_in)) {
len = utf8_charlen(str, len);
@@ -371,26 +372,27 @@ STATIC mp_obj_t mp_builtin_ord(mp_obj_t o_in) {
}
}
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("ord expects a character");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"ord() expected a character, but string of length %d found", (int)len));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("ord expects a character"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("ord() expected a character, but string of length %d found"), (int)len);
#endif
}
MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_ord_obj, mp_builtin_ord);
STATIC mp_obj_t mp_builtin_pow(size_t n_args, const mp_obj_t *args) {
switch (n_args) {
case 2: return mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]);
case 2:
return mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]);
default:
#if !MICROPY_PY_BUILTINS_POW3
mp_raise_msg(&mp_type_NotImplementedError, "3-arg pow() not supported");
#elif MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_MPZ
#if !MICROPY_PY_BUILTINS_POW3
mp_raise_NotImplementedError(MP_ERROR_TEXT("3-arg pow() not supported"));
#elif MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_MPZ
return mp_binary_op(MP_BINARY_OP_MODULO, mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]), args[2]);
#else
#else
return mp_obj_int_pow3(args[0], args[1], args[2]);
#endif
#endif
}
}
MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj, 2, 3, mp_builtin_pow);
@@ -485,7 +487,7 @@ STATIC mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) {
}
mp_obj_t mult = mp_binary_op(MP_BINARY_OP_POWER, MP_OBJ_NEW_SMALL_INT(10), MP_OBJ_NEW_SMALL_INT(-num_dig));
mp_obj_t half_mult = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, mult, MP_OBJ_NEW_SMALL_INT(2));
mp_obj_t half_mult = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, mult, MP_OBJ_NEW_SMALL_INT(2));
mp_obj_t modulo = mp_binary_op(MP_BINARY_OP_MODULO, o_in, mult);
mp_obj_t rounded = mp_binary_op(MP_BINARY_OP_SUBTRACT, o_in, modulo);
if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, half_mult, modulo))) {
@@ -503,29 +505,33 @@ STATIC mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) {
}
#endif
}
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
mp_float_t val = mp_obj_get_float(o_in);
if (n_args > 1) {
mp_int_t num_dig = mp_obj_get_int(args[1]);
mp_float_t mult = MICROPY_FLOAT_C_FUN(pow)(10, num_dig);
mp_float_t mult = MICROPY_FLOAT_C_FUN(pow)(10, (mp_float_t)num_dig);
// TODO may lead to overflow
mp_float_t rounded = MICROPY_FLOAT_C_FUN(nearbyint)(val * mult) / mult;
return mp_obj_new_float(rounded);
}
mp_float_t rounded = MICROPY_FLOAT_C_FUN(nearbyint)(val);
return mp_obj_new_int_from_float(rounded);
#else
#else
mp_int_t r = mp_obj_get_int(o_in);
return mp_obj_new_int(r);
#endif
#endif
}
MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_round_obj, 1, 2, mp_builtin_round);
STATIC mp_obj_t mp_builtin_sum(size_t n_args, const mp_obj_t *args) {
mp_obj_t value;
switch (n_args) {
case 1: value = MP_OBJ_NEW_SMALL_INT(0); break;
default: value = args[1]; break;
case 1:
value = MP_OBJ_NEW_SMALL_INT(0);
break;
default:
value = args[1];
break;
}
mp_obj_iter_buf_t iter_buf;
mp_obj_t iterable = mp_getiter(args[0], &iter_buf);
@@ -539,7 +545,7 @@ MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_sum_obj, 1, 2, mp_builtin_sum);
STATIC mp_obj_t mp_builtin_sorted(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
if (n_args > 1) {
mp_raise_TypeError("must use keyword argument for key function");
mp_raise_TypeError(MP_ERROR_TEXT("must use keyword argument for key function"));
}
mp_obj_t self = mp_type_list.make_new(&mp_type_list, 1, 0, args);
mp_obj_list_sort(1, &self, kwargs);
@@ -552,7 +558,11 @@ MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_sorted_obj, 1, mp_builtin_sorted);
static inline mp_obj_t mp_load_attr_default(mp_obj_t base, qstr attr, mp_obj_t defval) {
mp_obj_t dest[2];
// use load_method, raising or not raising exception
((defval == MP_OBJ_NULL) ? mp_load_method : mp_load_method_maybe)(base, attr, dest);
if (defval == MP_OBJ_NULL) {
mp_load_method(base, attr, dest);
} else {
mp_load_method_protected(base, attr, dest, false);
}
if (dest[0] == MP_OBJ_NULL) {
return defval;
} else if (dest[1] == MP_OBJ_NULL) {
@@ -762,8 +772,6 @@ STATIC const mp_rom_map_elem_t mp_module_builtins_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR_ViperTypeError), MP_ROM_PTR(&mp_type_ViperTypeError) },
#endif
{ MP_ROM_QSTR(MP_QSTR_ZeroDivisionError), MP_ROM_PTR(&mp_type_ZeroDivisionError) },
// Somehow CPython managed to have OverflowError not inherit from ValueError ;-/
// TODO: For MICROPY_CPYTHON_COMPAT==0 use ValueError to avoid exc proliferation
// Extra builtins as defined by a port
MICROPY_PORT_BUILTINS
@@ -773,5 +781,5 @@ MP_DEFINE_CONST_DICT(mp_module_builtins_globals, mp_module_builtins_globals_tabl
const mp_obj_module_t mp_module_builtins = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_builtins_globals,
.globals = (mp_obj_dict_t *)&mp_module_builtins_globals,
};

View File

@@ -43,7 +43,7 @@ STATIC mp_obj_t mp_cmath_polar(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
mp_obj_t tuple[2] = {
mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(real*real + imag*imag)),
mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(real * real + imag * imag)),
mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real)),
};
return mp_obj_new_tuple(2, tuple);
@@ -72,7 +72,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_exp_obj, mp_cmath_exp);
STATIC mp_obj_t mp_cmath_log(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
return mp_obj_new_complex(0.5 * MICROPY_FLOAT_C_FUN(log)(real*real + imag*imag), MICROPY_FLOAT_C_FUN(atan2)(imag, real));
return mp_obj_new_complex(MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(log)(real * real + imag * imag), MICROPY_FLOAT_C_FUN(atan2)(imag, real));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log_obj, mp_cmath_log);
@@ -81,7 +81,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log_obj, mp_cmath_log);
STATIC mp_obj_t mp_cmath_log10(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
return mp_obj_new_complex(0.5 * MICROPY_FLOAT_C_FUN(log10)(real*real + imag*imag), 0.4342944819032518 * MICROPY_FLOAT_C_FUN(atan2)(imag, real));
return mp_obj_new_complex(MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(log10)(real * real + imag * imag), MICROPY_FLOAT_CONST(0.4342944819032518) * MICROPY_FLOAT_C_FUN(atan2)(imag, real));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log10_obj, mp_cmath_log10);
#endif
@@ -90,8 +90,8 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log10_obj, mp_cmath_log10);
STATIC mp_obj_t mp_cmath_sqrt(mp_obj_t z_obj) {
mp_float_t real, imag;
mp_obj_get_complex(z_obj, &real, &imag);
mp_float_t sqrt_abs = MICROPY_FLOAT_C_FUN(pow)(real*real + imag*imag, 0.25);
mp_float_t theta = 0.5 * MICROPY_FLOAT_C_FUN(atan2)(imag, real);
mp_float_t sqrt_abs = MICROPY_FLOAT_C_FUN(pow)(real * real + imag * imag, MICROPY_FLOAT_CONST(0.25));
mp_float_t theta = MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(atan2)(imag, real);
return mp_obj_new_complex(sqrt_abs * MICROPY_FLOAT_C_FUN(cos)(theta), sqrt_abs * MICROPY_FLOAT_C_FUN(sin)(theta));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sqrt_obj, mp_cmath_sqrt);
@@ -125,28 +125,28 @@ STATIC const mp_rom_map_elem_t mp_module_cmath_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_cmath_log10_obj) },
#endif
{ MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_cmath_sqrt_obj) },
//{ MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_cmath_acos_obj) },
//{ MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_cmath_asin_obj) },
//{ MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_cmath_atan_obj) },
// { MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_cmath_acos_obj) },
// { MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_cmath_asin_obj) },
// { MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_cmath_atan_obj) },
{ MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_cmath_cos_obj) },
{ MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_cmath_sin_obj) },
//{ MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_cmath_tan_obj) },
//{ MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_cmath_acosh_obj) },
//{ MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_cmath_asinh_obj) },
//{ MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_cmath_atanh_obj) },
//{ MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_cmath_cosh_obj) },
//{ MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_cmath_sinh_obj) },
//{ MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_cmath_tanh_obj) },
//{ MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_cmath_isfinite_obj) },
//{ MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_cmath_isinf_obj) },
//{ MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_cmath_isnan_obj) },
// { MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_cmath_tan_obj) },
// { MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_cmath_acosh_obj) },
// { MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_cmath_asinh_obj) },
// { MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_cmath_atanh_obj) },
// { MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_cmath_cosh_obj) },
// { MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_cmath_sinh_obj) },
// { MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_cmath_tanh_obj) },
// { MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_cmath_isfinite_obj) },
// { MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_cmath_isinf_obj) },
// { MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_cmath_isnan_obj) },
};
STATIC MP_DEFINE_CONST_DICT(mp_module_cmath_globals, mp_module_cmath_globals_table);
const mp_obj_module_t mp_module_cmath = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_cmath_globals,
.globals = (mp_obj_dict_t *)&mp_module_cmath_globals,
};
#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_CMATH

View File

@@ -43,7 +43,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_collections_globals, mp_module_collections
const mp_obj_module_t mp_module_collections = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_collections_globals,
.globals = (mp_obj_dict_t *)&mp_module_collections_globals,
};
#endif // MICROPY_PY_COLLECTIONS

View File

@@ -33,11 +33,11 @@
// collect(): run a garbage collection
STATIC mp_obj_t py_gc_collect(void) {
gc_collect();
#if MICROPY_PY_GC_COLLECT_RETVAL
#if MICROPY_PY_GC_COLLECT_RETVAL
return MP_OBJ_NEW_SMALL_INT(MP_STATE_MEM(gc_collected));
#else
#else
return mp_const_none;
#endif
#endif
}
MP_DEFINE_CONST_FUN_OBJ_0(gc_collect_obj, py_gc_collect);
@@ -112,7 +112,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_gc_globals, mp_module_gc_globals_table);
const mp_obj_module_t mp_module_gc = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_gc_globals,
.globals = (mp_obj_dict_t *)&mp_module_gc_globals,
};
#endif

View File

@@ -59,12 +59,17 @@ STATIC mp_uint_t iobase_read_write(mp_obj_t obj, void *buf, mp_uint_t size, int
mp_load_method(obj, qst, dest);
mp_obj_array_t ar = {{&mp_type_bytearray}, BYTEARRAY_TYPECODE, 0, size, buf};
dest[2] = MP_OBJ_FROM_PTR(&ar);
mp_obj_t ret = mp_call_method_n_kw(1, 0, dest);
if (ret == mp_const_none) {
mp_obj_t ret_obj = mp_call_method_n_kw(1, 0, dest);
if (ret_obj == mp_const_none) {
*errcode = MP_EAGAIN;
return MP_STREAM_ERROR;
}
mp_int_t ret = mp_obj_get_int(ret_obj);
if (ret >= 0) {
return ret;
} else {
return mp_obj_get_int(ret);
*errcode = -ret;
return MP_STREAM_ERROR;
}
}
STATIC mp_uint_t iobase_read(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode) {
@@ -72,7 +77,7 @@ STATIC mp_uint_t iobase_read(mp_obj_t obj, void *buf, mp_uint_t size, int *errco
}
STATIC mp_uint_t iobase_write(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode) {
return iobase_read_write(obj, (void*)buf, size, errcode, MP_QSTR_write);
return iobase_read_write(obj, (void *)buf, size, errcode, MP_QSTR_write);
}
STATIC mp_uint_t iobase_ioctl(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode) {
@@ -144,7 +149,7 @@ STATIC mp_uint_t bufwriter_write(mp_obj_t self_in, const void *buf, mp_uint_t si
// is word-aligned, to guard against obscure cases when it matters, e.g.
// https://github.com/micropython/micropython/issues/1863
memcpy(self->buf + self->len, buf, rem);
buf = (byte*)buf + rem;
buf = (byte *)buf + rem;
size -= rem;
mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->alloc, errcode);
(void)out_sz;
@@ -190,12 +195,12 @@ STATIC const mp_stream_p_t bufwriter_stream_p = {
.write = bufwriter_write,
};
STATIC const mp_obj_type_t bufwriter_type = {
STATIC const mp_obj_type_t mp_type_bufwriter = {
{ &mp_type_type },
.name = MP_QSTR_BufferedWriter,
.make_new = bufwriter_make_new,
.protocol = &bufwriter_stream_p,
.locals_dict = (mp_obj_dict_t*)&bufwriter_locals_dict,
.locals_dict = (mp_obj_dict_t *)&bufwriter_locals_dict,
};
#endif // MICROPY_PY_IO_BUFFEREDWRITER
@@ -231,14 +236,14 @@ STATIC mp_obj_t resource_stream(mp_obj_t package_in, mp_obj_t path_in) {
mp_obj_stringio_t *o = m_new_obj(mp_obj_stringio_t);
o->base.type = &mp_type_bytesio;
o->vstr = m_new_obj(vstr_t);
vstr_init_fixed_buf(o->vstr, len + 1, (char*)data);
vstr_init_fixed_buf(o->vstr, len + 1, (char *)data);
o->vstr->len = len;
o->pos = 0;
return MP_OBJ_FROM_PTR(o);
}
mp_obj_t path_out = mp_obj_new_str(path_buf.buf, path_buf.len);
return mp_builtin_open(1, &path_out, (mp_map_t*)&mp_const_empty_map);
return mp_builtin_open(1, &path_out, (mp_map_t *)&mp_const_empty_map);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(resource_stream_obj, resource_stream);
#endif
@@ -265,7 +270,7 @@ STATIC const mp_rom_map_elem_t mp_module_io_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR_BytesIO), MP_ROM_PTR(&mp_type_bytesio) },
#endif
#if MICROPY_PY_IO_BUFFEREDWRITER
{ MP_ROM_QSTR(MP_QSTR_BufferedWriter), MP_ROM_PTR(&bufwriter_type) },
{ MP_ROM_QSTR(MP_QSTR_BufferedWriter), MP_ROM_PTR(&mp_type_bufwriter) },
#endif
};
@@ -273,7 +278,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_io_globals, mp_module_io_globals_table);
const mp_obj_module_t mp_module_io = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_io_globals,
.globals = (mp_obj_dict_t *)&mp_module_io_globals,
};
#endif

View File

@@ -34,9 +34,11 @@
// M_PI is not part of the math.h standard and may not be defined
// And by defining our own we can ensure it uses the correct const format.
#define MP_PI MICROPY_FLOAT_CONST(3.14159265358979323846)
#define MP_PI_4 MICROPY_FLOAT_CONST(0.78539816339744830962)
#define MP_3_PI_4 MICROPY_FLOAT_CONST(2.35619449019234492885)
STATIC NORETURN void math_error(void) {
mp_raise_ValueError("math domain error");
mp_raise_ValueError(MP_ERROR_TEXT("math domain error"));
}
STATIC mp_obj_t math_generic_1(mp_obj_t x_obj, mp_float_t (*f)(mp_float_t)) {
@@ -59,30 +61,30 @@ STATIC mp_obj_t math_generic_2(mp_obj_t x_obj, mp_obj_t y_obj, mp_float_t (*f)(m
}
#define MATH_FUN_1(py_name, c_name) \
STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { \
STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { \
return math_generic_1(x_obj, MICROPY_FLOAT_C_FUN(c_name)); \
} \
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
#define MATH_FUN_1_TO_BOOL(py_name, c_name) \
STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { return mp_obj_new_bool(c_name(mp_obj_get_float(x_obj))); } \
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { return mp_obj_new_bool(c_name(mp_obj_get_float(x_obj))); } \
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
#define MATH_FUN_1_TO_INT(py_name, c_name) \
STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj) { return mp_obj_new_int_from_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj))); } \
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_## py_name ## _obj, mp_math_ ## py_name);
STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { return mp_obj_new_int_from_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj))); } \
STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
#define MATH_FUN_2(py_name, c_name) \
STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
return math_generic_2(x_obj, y_obj, MICROPY_FLOAT_C_FUN(c_name)); \
} \
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_## py_name ## _obj, mp_math_ ## py_name);
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_##py_name##_obj, mp_math_##py_name);
#define MATH_FUN_2_FLT_INT(py_name, c_name) \
STATIC mp_obj_t mp_math_ ## py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
return mp_obj_new_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj), mp_obj_get_int(y_obj))); \
} \
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_## py_name ## _obj, mp_math_ ## py_name);
STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_##py_name##_obj, mp_math_##py_name);
#if MP_NEED_LOG2
#undef log2
@@ -96,7 +98,19 @@ mp_float_t MICROPY_FLOAT_C_FUN(log2)(mp_float_t x) {
// sqrt(x): returns the square root of x
MATH_FUN_1(sqrt, sqrt)
// pow(x, y): returns x to the power of y
#if MICROPY_PY_MATH_POW_FIX_NAN
mp_float_t pow_func(mp_float_t x, mp_float_t y) {
// pow(base, 0) returns 1 for any base, even when base is NaN
// pow(+1, exponent) returns 1 for any exponent, even when exponent is NaN
if (x == MICROPY_FLOAT_CONST(1.0) || y == MICROPY_FLOAT_CONST(0.0)) {
return MICROPY_FLOAT_CONST(1.0);
}
return MICROPY_FLOAT_C_FUN(pow)(x, y);
}
MATH_FUN_2(pow, pow_func)
#else
MATH_FUN_2(pow, pow)
#endif
// exp(x)
MATH_FUN_1(exp, exp)
#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
@@ -132,7 +146,17 @@ MATH_FUN_1(asin, asin)
// atan(x)
MATH_FUN_1(atan, atan)
// atan2(y, x)
#if MICROPY_PY_MATH_ATAN2_FIX_INFNAN
mp_float_t atan2_func(mp_float_t x, mp_float_t y) {
if (isinf(x) && isinf(y)) {
return copysign(y < 0 ? MP_3_PI_4 : MP_PI_4, x);
}
return atan2(x, y);
}
MATH_FUN_2(atan2, atan2_func)
#else
MATH_FUN_2(atan2, atan2)
#endif
// ceil(x)
MATH_FUN_1_TO_INT(ceil, ceil)
// copysign(x, y)
@@ -146,9 +170,16 @@ STATIC mp_float_t MICROPY_FLOAT_C_FUN(fabs_func)(mp_float_t x) {
}
MATH_FUN_1(fabs, fabs_func)
// floor(x)
MATH_FUN_1_TO_INT(floor, floor) //TODO: delegate to x.__floor__() if x is not a float
MATH_FUN_1_TO_INT(floor, floor) // TODO: delegate to x.__floor__() if x is not a float
// fmod(x, y)
#if MICROPY_PY_MATH_FMOD_FIX_INFNAN
mp_float_t fmod_func(mp_float_t x, mp_float_t y) {
return (!isinf(x) && isinf(y)) ? x : fmod(x, y);
}
MATH_FUN_2(fmod, fmod_func)
#else
MATH_FUN_2(fmod, fmod)
#endif
// isfinite(x)
MATH_FUN_1_TO_BOOL(isfinite, isfinite)
// isinf(x)
@@ -169,21 +200,19 @@ MATH_FUN_1(gamma, tgamma)
// lgamma(x): return the natural logarithm of the gamma function of x
MATH_FUN_1(lgamma, lgamma)
#endif
//TODO: fsum
// TODO: fsum
#if MICROPY_PY_MATH_ISCLOSE
STATIC mp_obj_t mp_math_isclose(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_a, ARG_b, ARG_rel_tol, ARG_abs_tol };
enum { ARG_rel_tol, ARG_abs_tol };
static const mp_arg_t allowed_args[] = {
{MP_QSTR_, MP_ARG_REQUIRED | MP_ARG_OBJ},
{MP_QSTR_, MP_ARG_REQUIRED | MP_ARG_OBJ},
{MP_QSTR_rel_tol, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL}},
{MP_QSTR_abs_tol, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NEW_SMALL_INT(0)}},
};
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
const mp_float_t a = mp_obj_get_float(args[ARG_a].u_obj);
const mp_float_t b = mp_obj_get_float(args[ARG_b].u_obj);
mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
const mp_float_t a = mp_obj_get_float(pos_args[0]);
const mp_float_t b = mp_obj_get_float(pos_args[1]);
const mp_float_t rel_tol = args[ARG_rel_tol].u_obj == MP_OBJ_NULL
? (mp_float_t)1e-9 : mp_obj_get_float(args[ARG_rel_tol].u_obj);
const mp_float_t abs_tol = mp_obj_get_float(args[ARG_abs_tol].u_obj);
@@ -223,7 +252,7 @@ STATIC mp_obj_t mp_math_log(size_t n_args, const mp_obj_t *args) {
if (base <= (mp_float_t)0.0) {
math_error();
} else if (base == (mp_float_t)1.0) {
mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero");
mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("divide by zero"));
}
return mp_obj_new_float(l / MICROPY_FLOAT_C_FUN(log)(base));
}
@@ -246,7 +275,13 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_frexp_obj, mp_math_frexp);
// modf(x)
STATIC mp_obj_t mp_math_modf(mp_obj_t x_obj) {
mp_float_t int_part = 0.0;
mp_float_t fractional_part = MICROPY_FLOAT_C_FUN(modf)(mp_obj_get_float(x_obj), &int_part);
mp_float_t x = mp_obj_get_float(x_obj);
mp_float_t fractional_part = MICROPY_FLOAT_C_FUN(modf)(x, &int_part);
#if MICROPY_PY_MATH_MODF_FIX_NEGZERO
if (fractional_part == MICROPY_FLOAT_CONST(0.0)) {
fractional_part = copysign(fractional_part, x);
}
#endif
mp_obj_t tuple[2];
tuple[0] = mp_obj_new_float(fractional_part);
tuple[1] = mp_obj_new_float(int_part);
@@ -294,7 +329,7 @@ STATIC mp_obj_t mp_math_factorial_inner(mp_uint_t start, mp_uint_t end) {
STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
mp_int_t max = mp_obj_get_int(x_obj);
if (max < 0) {
mp_raise_msg(&mp_type_ValueError, "negative factorial");
mp_raise_ValueError(MP_ERROR_TEXT("negative factorial"));
} else if (max == 0) {
return MP_OBJ_NEW_SMALL_INT(1);
}
@@ -308,7 +343,7 @@ STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
mp_int_t max = mp_obj_get_int(x_obj);
if (max < 0) {
mp_raise_msg(&mp_type_ValueError, "negative factorial");
mp_raise_ValueError(MP_ERROR_TEXT("negative factorial"));
} else if (max <= 1) {
return MP_OBJ_NEW_SMALL_INT(1);
}
@@ -392,7 +427,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_math_globals, mp_module_math_globals_table
const mp_obj_module_t mp_module_math = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_math_globals,
.globals = (mp_obj_dict_t *)&mp_module_math_globals,
};
#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH

View File

@@ -68,25 +68,25 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_peak_obj, mp_micropython_mem
mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args) {
(void)args;
#if MICROPY_MEM_STATS
#if MICROPY_MEM_STATS
mp_printf(&mp_plat_print, "mem: total=" UINT_FMT ", current=" UINT_FMT ", peak=" UINT_FMT "\n",
(mp_uint_t)m_get_total_bytes_allocated(), (mp_uint_t)m_get_current_bytes_allocated(), (mp_uint_t)m_get_peak_bytes_allocated());
#endif
#if MICROPY_STACK_CHECK
#endif
#if MICROPY_STACK_CHECK
mp_printf(&mp_plat_print, "stack: " UINT_FMT " out of " UINT_FMT "\n",
mp_stack_usage(), (mp_uint_t)MP_STATE_THREAD(stack_limit));
#else
#else
mp_printf(&mp_plat_print, "stack: " UINT_FMT "\n", mp_stack_usage());
#endif
#if MICROPY_ENABLE_GC
#endif
#if MICROPY_ENABLE_GC
gc_dump_info();
if (n_args == 1) {
// arg given means dump gc allocation table
gc_dump_alloc_table();
}
#else
#else
(void)n_args;
#endif
#endif
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_mem_info_obj, 0, 1, mp_micropython_mem_info);
@@ -130,9 +130,16 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_lock_obj, mp_micropython_he
STATIC mp_obj_t mp_micropython_heap_unlock(void) {
gc_unlock();
return mp_const_none;
return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_unlock_obj, mp_micropython_heap_unlock);
#if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
STATIC mp_obj_t mp_micropython_heap_locked(void) {
return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_locked_obj, mp_micropython_heap_locked);
#endif
#endif
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
@@ -150,7 +157,7 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_micropython_kbd_intr_obj, mp_micropython_kbd
#if MICROPY_ENABLE_SCHEDULER
STATIC mp_obj_t mp_micropython_schedule(mp_obj_t function, mp_obj_t arg) {
if (!mp_sched_schedule(function, arg)) {
mp_raise_msg(&mp_type_RuntimeError, "schedule queue full");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("schedule queue full"));
}
return mp_const_none;
}
@@ -163,27 +170,30 @@ STATIC const mp_rom_map_elem_t mp_module_micropython_globals_table[] = {
#if MICROPY_ENABLE_COMPILER
{ MP_ROM_QSTR(MP_QSTR_opt_level), MP_ROM_PTR(&mp_micropython_opt_level_obj) },
#endif
#if MICROPY_PY_MICROPYTHON_MEM_INFO
#if MICROPY_MEM_STATS
#if MICROPY_PY_MICROPYTHON_MEM_INFO
#if MICROPY_MEM_STATS
{ MP_ROM_QSTR(MP_QSTR_mem_total), MP_ROM_PTR(&mp_micropython_mem_total_obj) },
{ MP_ROM_QSTR(MP_QSTR_mem_current), MP_ROM_PTR(&mp_micropython_mem_current_obj) },
{ MP_ROM_QSTR(MP_QSTR_mem_peak), MP_ROM_PTR(&mp_micropython_mem_peak_obj) },
#endif
#endif
{ MP_ROM_QSTR(MP_QSTR_mem_info), MP_ROM_PTR(&mp_micropython_mem_info_obj) },
{ MP_ROM_QSTR(MP_QSTR_qstr_info), MP_ROM_PTR(&mp_micropython_qstr_info_obj) },
#endif
#endif
#if MICROPY_PY_MICROPYTHON_STACK_USE
{ MP_ROM_QSTR(MP_QSTR_stack_use), MP_ROM_PTR(&mp_micropython_stack_use_obj) },
#endif
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
{ MP_ROM_QSTR(MP_QSTR_alloc_emergency_exception_buf), MP_ROM_PTR(&mp_alloc_emergency_exception_buf_obj) },
#endif
#endif
#if MICROPY_ENABLE_PYSTACK
{ MP_ROM_QSTR(MP_QSTR_pystack_use), MP_ROM_PTR(&mp_micropython_pystack_use_obj) },
#endif
#if MICROPY_ENABLE_GC
{ MP_ROM_QSTR(MP_QSTR_heap_lock), MP_ROM_PTR(&mp_micropython_heap_lock_obj) },
{ MP_ROM_QSTR(MP_QSTR_heap_unlock), MP_ROM_PTR(&mp_micropython_heap_unlock_obj) },
#if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
{ MP_ROM_QSTR(MP_QSTR_heap_locked), MP_ROM_PTR(&mp_micropython_heap_locked_obj) },
#endif
#endif
#if MICROPY_KBD_EXCEPTION
{ MP_ROM_QSTR(MP_QSTR_kbd_intr), MP_ROM_PTR(&mp_micropython_kbd_intr_obj) },
@@ -197,5 +207,5 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_micropython_globals, mp_module_micropython
const mp_obj_module_t mp_module_micropython = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_micropython_globals,
.globals = (mp_obj_dict_t *)&mp_module_micropython_globals,
};

View File

@@ -141,7 +141,7 @@ STATIC mp_obj_t struct_unpack_from(size_t n_args, const mp_obj_t *args) {
// negative offsets are relative to the end of the buffer
offset = bufinfo.len + offset;
if (offset < 0) {
mp_raise_ValueError("buffer too small");
mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
}
}
p += offset;
@@ -150,7 +150,7 @@ STATIC mp_obj_t struct_unpack_from(size_t n_args, const mp_obj_t *args) {
// Check that the input buffer is big enough to unpack all the values
if (p + total_sz > end_p) {
mp_raise_ValueError("buffer too small");
mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
}
for (size_t i = 0; i < num_items;) {
@@ -217,7 +217,7 @@ STATIC mp_obj_t struct_pack(size_t n_args, const mp_obj_t *args) {
mp_int_t size = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
vstr_t vstr;
vstr_init_len(&vstr, size);
byte *p = (byte*)vstr.buf;
byte *p = (byte *)vstr.buf;
memset(p, 0, size);
struct_pack_into_internal(args[0], p, n_args - 1, &args[1]);
return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
@@ -232,7 +232,7 @@ STATIC mp_obj_t struct_pack_into(size_t n_args, const mp_obj_t *args) {
// negative offsets are relative to the end of the buffer
offset = (mp_int_t)bufinfo.len + offset;
if (offset < 0) {
mp_raise_ValueError("buffer too small");
mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
}
}
byte *p = (byte *)bufinfo.buf;
@@ -242,7 +242,7 @@ STATIC mp_obj_t struct_pack_into(size_t n_args, const mp_obj_t *args) {
// Check that the output buffer is big enough to hold all the values
mp_int_t sz = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
if (p + sz > end_p) {
mp_raise_ValueError("buffer too small");
mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
}
struct_pack_into_internal(args[0], p, n_args - 3, &args[3]);
@@ -263,7 +263,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_struct_globals, mp_module_struct_globals_t
const mp_obj_module_t mp_module_ustruct = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_struct_globals,
.globals = (mp_obj_dict_t *)&mp_module_struct_globals,
};
#endif

View File

@@ -53,7 +53,7 @@ const mp_print_t mp_sys_stdout_print = {&mp_sys_stdout_obj, mp_stream_write_adap
#endif
// version - Python language version that this implementation conforms to, as a string
STATIC const MP_DEFINE_STR_OBJ(version_obj, "3.4.0");
STATIC const MP_DEFINE_STR_OBJ(mp_sys_version_obj, "3.4.0");
// version_info - Python language version that this implementation conforms to, as a tuple of ints
#define I(n) MP_OBJ_NEW_SMALL_INT(n)
@@ -89,8 +89,8 @@ STATIC MP_DEFINE_ATTRTUPLE(
mp_sys_implementation_obj,
impl_fields,
2 + MICROPY_PERSISTENT_CODE_LOAD,
SYS_IMPLEMENTATION_ELEMS
);
SYS_IMPLEMENTATION_ELEMS
);
#else
STATIC const mp_rom_obj_tuple_t mp_sys_implementation_obj = {
{&mp_type_tuple},
@@ -105,18 +105,16 @@ STATIC const mp_rom_obj_tuple_t mp_sys_implementation_obj = {
#ifdef MICROPY_PY_SYS_PLATFORM
// platform - the platform that MicroPython is running on
STATIC const MP_DEFINE_STR_OBJ(platform_obj, MICROPY_PY_SYS_PLATFORM);
STATIC const MP_DEFINE_STR_OBJ(mp_sys_platform_obj, MICROPY_PY_SYS_PLATFORM);
#endif
// exit([retval]): raise SystemExit, with optional argument given to the exception
STATIC mp_obj_t mp_sys_exit(size_t n_args, const mp_obj_t *args) {
mp_obj_t exc;
if (n_args == 0) {
exc = mp_obj_new_exception(&mp_type_SystemExit);
mp_raise_type(&mp_type_SystemExit);
} else {
exc = mp_obj_new_exception_arg1(&mp_type_SystemExit, args[0]);
mp_raise_type_arg(&mp_type_SystemExit, args[0]);
}
nlr_raise(exc);
}
MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_sys_exit_obj, 0, 1, mp_sys_exit);
@@ -187,13 +185,13 @@ MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_settrace_obj, mp_sys_settrace);
STATIC const mp_rom_map_elem_t mp_module_sys_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_sys) },
//{ MP_ROM_QSTR(MP_QSTR_path), MP_ROM_PTR(&MP_STATE_VM(mp_sys_path_obj)) },
//{ MP_ROM_QSTR(MP_QSTR_argv), MP_ROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)) },
{ MP_ROM_QSTR(MP_QSTR_version), MP_ROM_PTR(&version_obj) },
{ MP_ROM_QSTR(MP_QSTR_path), MP_ROM_PTR(&MP_STATE_VM(mp_sys_path_obj)) },
{ MP_ROM_QSTR(MP_QSTR_argv), MP_ROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)) },
{ MP_ROM_QSTR(MP_QSTR_version), MP_ROM_PTR(&mp_sys_version_obj) },
{ MP_ROM_QSTR(MP_QSTR_version_info), MP_ROM_PTR(&mp_sys_version_info_obj) },
{ MP_ROM_QSTR(MP_QSTR_implementation), MP_ROM_PTR(&mp_sys_implementation_obj) },
#ifdef MICROPY_PY_SYS_PLATFORM
{ MP_ROM_QSTR(MP_QSTR_platform), MP_ROM_PTR(&platform_obj) },
{ MP_ROM_QSTR(MP_QSTR_platform), MP_ROM_PTR(&mp_sys_platform_obj) },
#endif
#if MP_ENDIANNESS_LITTLE
{ MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_little) },
@@ -210,7 +208,7 @@ STATIC const mp_rom_map_elem_t mp_module_sys_globals_table[] = {
// of "one" bits in sys.maxsize.
{ MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_INT(MP_SMALL_INT_MAX) },
#else
{ MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_PTR(&mp_maxsize_obj) },
{ MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_PTR(&mp_sys_maxsize_obj) },
#endif
#endif
@@ -252,7 +250,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_sys_globals, mp_module_sys_globals_table);
const mp_obj_module_t mp_module_sys = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_sys_globals,
.globals = (mp_obj_dict_t *)&mp_module_sys_globals,
};
#endif

View File

@@ -120,7 +120,7 @@ STATIC MP_DEFINE_CONST_DICT(thread_lock_locals_dict, thread_lock_locals_dict_tab
STATIC const mp_obj_type_t mp_type_thread_lock = {
{ &mp_type_type },
.name = MP_QSTR_lock,
.locals_dict = (mp_obj_dict_t*)&thread_lock_locals_dict,
.locals_dict = (mp_obj_dict_t *)&thread_lock_locals_dict,
};
/****************************************************************/
@@ -157,7 +157,7 @@ typedef struct _thread_entry_args_t {
STATIC void *thread_entry(void *args_in) {
// Execution begins here for a new thread. We do not have the GIL.
thread_entry_args_t *args = (thread_entry_args_t*)args_in;
thread_entry_args_t *args = (thread_entry_args_t *)args_in;
mp_state_thread_t ts;
mp_thread_set_state(&ts);
@@ -171,6 +171,11 @@ STATIC void *thread_entry(void *args_in) {
mp_pystack_init(mini_pystack, &mini_pystack[128]);
#endif
// The GC starts off unlocked on this thread.
ts.gc_lock_depth = 0;
ts.mp_pending_exception = MP_OBJ_NULL;
// set locals and globals from the calling context
mp_locals_set(args->dict_locals);
mp_globals_set(args->dict_globals);
@@ -181,7 +186,6 @@ STATIC void *thread_entry(void *args_in) {
mp_thread_start();
// TODO set more thread-specific state here:
// mp_pending_exception? (root pointer)
// cur_exception (root pointer)
DEBUG_printf("[thread] start ts=%p args=%p stack=%p\n", &ts, &args, MP_STATE_THREAD(stack_top));
@@ -193,7 +197,7 @@ STATIC void *thread_entry(void *args_in) {
} else {
// uncaught exception
// check for SystemExit
mp_obj_base_t *exc = (mp_obj_base_t*)nlr.ret_val;
mp_obj_base_t *exc = (mp_obj_base_t *)nlr.ret_val;
if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(exc->type), MP_OBJ_FROM_PTR(&mp_type_SystemExit))) {
// swallow exception silently
} else {
@@ -235,9 +239,9 @@ STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args)
} else {
// positional and keyword arguments
if (mp_obj_get_type(args[2]) != &mp_type_dict) {
mp_raise_TypeError("expecting a dict for keyword args");
mp_raise_TypeError(MP_ERROR_TEXT("expecting a dict for keyword args"));
}
mp_map_t *map = &((mp_obj_dict_t*)MP_OBJ_TO_PTR(args[2]))->map;
mp_map_t *map = &((mp_obj_dict_t *)MP_OBJ_TO_PTR(args[2]))->map;
th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len + 2 * map->used);
th_args->n_kw = map->used;
// copy across the keyword arguments
@@ -249,7 +253,7 @@ STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args)
}
}
// copy agross the positional arguments
// copy across the positional arguments
th_args->n_args = pos_args_len;
memcpy(th_args->args, pos_args_items, pos_args_len * sizeof(mp_obj_t));
@@ -271,7 +275,7 @@ STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args)
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_start_new_thread_obj, 2, 3, mod_thread_start_new_thread);
STATIC mp_obj_t mod_thread_exit(void) {
nlr_raise(mp_obj_new_exception(&mp_type_SystemExit));
mp_raise_type(&mp_type_SystemExit);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_exit_obj, mod_thread_exit);
@@ -294,7 +298,7 @@ STATIC MP_DEFINE_CONST_DICT(mp_module_thread_globals, mp_module_thread_globals_t
const mp_obj_module_t mp_module_thread = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_thread_globals,
.globals = (mp_obj_dict_t *)&mp_module_thread_globals,
};
#endif // MICROPY_PY_THREAD

View File

@@ -63,9 +63,9 @@
#if MICROPY_PY_UERRNO_ERRORCODE
STATIC const mp_rom_map_elem_t errorcode_table[] = {
#define X(e) { MP_ROM_INT(MP_ ## e), MP_ROM_QSTR(MP_QSTR_## e) },
#define X(e) { MP_ROM_INT(MP_##e), MP_ROM_QSTR(MP_QSTR_##e) },
MICROPY_PY_UERRNO_LIST
#undef X
#undef X
};
STATIC const mp_obj_dict_t errorcode_dict = {
@@ -76,7 +76,7 @@ STATIC const mp_obj_dict_t errorcode_dict = {
.is_ordered = 1,
.used = MP_ARRAY_SIZE(errorcode_table),
.alloc = MP_ARRAY_SIZE(errorcode_table),
.table = (mp_map_elem_t*)(mp_rom_map_elem_t*)errorcode_table,
.table = (mp_map_elem_t *)(mp_rom_map_elem_t *)errorcode_table,
},
};
#endif
@@ -87,22 +87,22 @@ STATIC const mp_rom_map_elem_t mp_module_uerrno_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR_errorcode), MP_ROM_PTR(&errorcode_dict) },
#endif
#define X(e) { MP_ROM_QSTR(MP_QSTR_## e), MP_ROM_INT(MP_ ## e) },
#define X(e) { MP_ROM_QSTR(MP_QSTR_##e), MP_ROM_INT(MP_##e) },
MICROPY_PY_UERRNO_LIST
#undef X
#undef X
};
STATIC MP_DEFINE_CONST_DICT(mp_module_uerrno_globals, mp_module_uerrno_globals_table);
const mp_obj_module_t mp_module_uerrno = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t*)&mp_module_uerrno_globals,
.globals = (mp_obj_dict_t *)&mp_module_uerrno_globals,
};
qstr mp_errno_to_str(mp_obj_t errno_val) {
#if MICROPY_PY_UERRNO_ERRORCODE
// We have the errorcode dict so can do a lookup using the hash map
mp_map_elem_t *elem = mp_map_lookup((mp_map_t*)&errorcode_dict.map, errno_val, MP_MAP_LOOKUP);
mp_map_elem_t *elem = mp_map_lookup((mp_map_t *)&errorcode_dict.map, errno_val, MP_MAP_LOOKUP);
if (elem == NULL) {
return MP_QSTRnull;
} else {
@@ -119,4 +119,4 @@ qstr mp_errno_to_str(mp_obj_t errno_val) {
#endif
}
#endif //MICROPY_PY_UERRNO
#endif // MICROPY_PY_UERRNO

View File

@@ -28,14 +28,14 @@
// Current version of MicroPython
#define MICROPY_VERSION_MAJOR 1
#define MICROPY_VERSION_MINOR 12
#define MICROPY_VERSION_MINOR 17
#define MICROPY_VERSION_MICRO 0
// Combined version as a 32-bit number for convenience
#define MICROPY_VERSION ( \
MICROPY_VERSION_MAJOR << 16 \
| MICROPY_VERSION_MINOR << 8 \
| MICROPY_VERSION_MICRO)
| MICROPY_VERSION_MINOR << 8 \
| MICROPY_VERSION_MICRO)
// String version
#define MICROPY_VERSION_STRING \
@@ -70,25 +70,28 @@
// A MicroPython object is a machine word having the following form:
// - xxxx...xxx1 : a small int, bits 1 and above are the value
// - xxxx...xx10 : a qstr, bits 2 and above are the value
// - xxxx...x010 : a qstr, bits 3 and above are the value
// - xxxx...x110 : an immediate object, bits 3 and above are the value
// - xxxx...xx00 : a pointer to an mp_obj_base_t (unless a fake object)
#define MICROPY_OBJ_REPR_A (0)
// A MicroPython object is a machine word having the following form:
// - xxxx...xx01 : a small int, bits 2 and above are the value
// - xxxx...xx11 : a qstr, bits 2 and above are the value
// - xxxx...x011 : a qstr, bits 3 and above are the value
// - xxxx...x111 : an immediate object, bits 3 and above are the value
// - xxxx...xxx0 : a pointer to an mp_obj_base_t (unless a fake object)
#define MICROPY_OBJ_REPR_B (1)
// A MicroPython object is a machine word having the following form (called R):
// - iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int with 31-bit signed value
// - 01111111 1qqqqqqq qqqqqqqq qqqqq110 str with 20-bit qstr value
// - 01111111 1qqqqqqq qqqqqqqq qqqq0110 str with 19-bit qstr value
// - 01111111 10000000 00000000 ssss1110 immediate object with 4-bit value
// - s1111111 10000000 00000000 00000010 +/- inf
// - s1111111 1xxxxxxx xxxxxxxx xxxxx010 nan, x != 0
// - seeeeeee efffffff ffffffff ffffff10 30-bit fp, e != 0xff
// - pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
// Str and float stored as O = R + 0x80800000, retrieved as R = O - 0x80800000.
// This makes strs easier to encode/decode as they have zeros in the top 9 bits.
// Str, immediate and float stored as O = R + 0x80800000, retrieved as R = O - 0x80800000.
// This makes strs/immediates easier to encode/decode as they have zeros in the top 9 bits.
// This scheme only works with 32-bit word size and float enabled.
#define MICROPY_OBJ_REPR_C (2)
@@ -98,6 +101,7 @@
// - 01111111 11111000 00000000 00000000 00000000 00000000 00000000 00000000 normalised nan
// - 01111111 11111101 iiiiiiii iiiiiiii iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int
// - 01111111 11111110 00000000 00000000 qqqqqqqq qqqqqqqq qqqqqqqq qqqqqqq1 str
// - 01111111 11111111 ss000000 00000000 00000000 00000000 00000000 00000000 immediate object
// - 01111111 11111100 00000000 00000000 pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
// Stored as O = R + 0x8004000000000000, retrieved as R = O - 0x8004000000000000.
// This makes pointers have all zeros in the top 32 bits.
@@ -115,7 +119,7 @@
// Number of bytes in memory allocation/GC block. Any size allocated will be
// rounded up to be multiples of this.
#ifndef MICROPY_BYTES_PER_GC_BLOCK
#define MICROPY_BYTES_PER_GC_BLOCK (4 * BYTES_PER_WORD)
#define MICROPY_BYTES_PER_GC_BLOCK (4 * MP_BYTES_PER_OBJ_WORD)
#endif
// Number of words allocated (in BSS) to the GC stack (minimum is 1)
@@ -218,6 +222,11 @@
#define MICROPY_MODULE_DICT_SIZE (1)
#endif
// Initial size of sys.modules dict
#ifndef MICROPY_LOADED_MODULES_DICT_SIZE
#define MICROPY_LOADED_MODULES_DICT_SIZE (3)
#endif
// Whether realloc/free should be passed allocated memory region size
// You must enable this if MICROPY_MEM_STATS is enabled
#ifndef MICROPY_MALLOC_USES_ALLOCATED_SIZE
@@ -272,6 +281,11 @@
#define MICROPY_PERSISTENT_CODE_SAVE (0)
#endif
// Whether to support saving persistent code to a file via mp_raw_code_save_file
#ifndef MICROPY_PERSISTENT_CODE_SAVE_FILE
#define MICROPY_PERSISTENT_CODE_SAVE_FILE (0)
#endif
// Whether generated code can persist independently of the VM/runtime instance
// This is enabled automatically when needed by other features
#ifndef MICROPY_PERSISTENT_CODE
@@ -293,6 +307,11 @@
#define MICROPY_EMIT_THUMB (0)
#endif
// Whether to emit ARMv7-M instruction support in thumb native code
#ifndef MICROPY_EMIT_THUMB_ARMV7M
#define MICROPY_EMIT_THUMB_ARMV7M (1)
#endif
// Whether to enable the thumb inline assembler
#ifndef MICROPY_EMIT_INLINE_THUMB
#define MICROPY_EMIT_INLINE_THUMB (0)
@@ -340,6 +359,18 @@
// Convenience definition for whether any native or inline assembler emitter is enabled
#define MICROPY_EMIT_MACHINE_CODE (MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM)
// Whether native relocatable code loaded from .mpy files is explicitly tracked
// so that the GC cannot reclaim it. Needed on architectures that allocate
// executable memory on the MicroPython heap and don't explicitly track this
// data some other way.
#ifndef MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
#if !MICROPY_EMIT_MACHINE_CODE || defined(MP_PLAT_ALLOC_EXEC) || defined(MP_PLAT_COMMIT_EXEC)
#define MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE (0)
#else
#define MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE (1)
#endif
#endif
/*****************************************************************************/
/* Compiler configuration */
@@ -431,6 +462,11 @@
#define MICROPY_DEBUG_MP_OBJ_SENTINELS (0)
#endif
// Whether to print parse rule names (rather than integers) in mp_parse_node_print
#ifndef MICROPY_DEBUG_PARSE_RULE_NAME
#define MICROPY_DEBUG_PARSE_RULE_NAME (0)
#endif
// Whether to enable a simple VM stack overflow check
#ifndef MICROPY_DEBUG_VM_STACK_OVERFLOW
#define MICROPY_DEBUG_VM_STACK_OVERFLOW (0)
@@ -440,7 +476,8 @@
/* Optimisations */
// Whether to use computed gotos in the VM, or a switch
// Computed gotos are roughly 10% faster, and increase VM code size by a little
// Computed gotos are roughly 10% faster, and increase VM code size by a little,
// e.g. ~1kiB on Cortex M4.
// Note: enabling this will use the gcc-specific extensions of ranged designated
// initialisers and addresses of labels, which are not part of the C99 standard.
#ifndef MICROPY_OPT_COMPUTED_GOTO
@@ -507,6 +544,12 @@
#define MICROPY_VM_HOOK_RETURN
#endif
// Hook for mp_sched_schedule when a function gets scheduled on sched_queue
// (this macro executes within an atomic section)
#ifndef MICROPY_SCHED_HOOK_SCHEDULED
#define MICROPY_SCHED_HOOK_SCHEDULED
#endif
// Whether to include the garbage collector
#ifndef MICROPY_ENABLE_GC
#define MICROPY_ENABLE_GC (0)
@@ -539,9 +582,9 @@
#define MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF (0)
#endif
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
# ifndef MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
# define MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE (0) // 0 - implies dynamic allocation
# endif
#ifndef MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
#define MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE (0) // 0 - implies dynamic allocation
#endif
#endif
// Whether to provide the mp_kbd_exception object, and micropython.kbd_intr function
@@ -560,11 +603,31 @@
#define MICROPY_HELPER_REPL (0)
#endif
// Allow enabling debug prints after each REPL line
#ifndef MICROPY_REPL_INFO
#define MICROPY_REPL_INFO (0)
#endif
// Whether to include emacs-style readline behavior in REPL
#ifndef MICROPY_REPL_EMACS_KEYS
#define MICROPY_REPL_EMACS_KEYS (0)
#endif
// Whether to include emacs-style word movement/kill readline behavior in REPL.
// This adds Alt+F, Alt+B, Alt+D and Alt+Backspace for forward-word, backward-word, forward-kill-word
// and backward-kill-word, respectively.
#ifndef MICROPY_REPL_EMACS_WORDS_MOVE
#define MICROPY_REPL_EMACS_WORDS_MOVE (0)
#endif
// Whether to include extra convenience keys for word movement/kill in readline REPL.
// This adds Ctrl+Right, Ctrl+Left and Ctrl+W for forward-word, backward-word and backward-kill-word
// respectively. Ctrl+Delete is not implemented because it's a very different escape sequence.
// Depends on MICROPY_REPL_EMACS_WORDS_MOVE.
#ifndef MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE
#define MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE (0)
#endif
// Whether to implement auto-indent in REPL
#ifndef MICROPY_REPL_AUTO_INDENT
#define MICROPY_REPL_AUTO_INDENT (0)
@@ -604,6 +667,8 @@ typedef long long mp_longint_impl_t;
#define MICROPY_ENABLE_DOC_STRING (0)
#endif
// Exception messages are removed (requires disabling MICROPY_ROM_TEXT_COMPRESSION)
#define MICROPY_ERROR_REPORTING_NONE (0)
// Exception messages are short static strings
#define MICROPY_ERROR_REPORTING_TERSE (1)
// Exception messages provide basic error details
@@ -698,7 +763,7 @@ typedef double mp_float_t;
// Whether to support module-level __getattr__ (see PEP 562)
#ifndef MICROPY_MODULE_GETATTR
#define MICROPY_MODULE_GETATTR (0)
#define MICROPY_MODULE_GETATTR (1)
#endif
// Whether module weak links are supported
@@ -804,6 +869,16 @@ typedef double mp_float_t;
#define MICROPY_PY_ASYNC_AWAIT (1)
#endif
// Support for literal string interpolation, f-strings (see PEP 498, Python 3.6+)
#ifndef MICROPY_PY_FSTRINGS
#define MICROPY_PY_FSTRINGS (0)
#endif
// Support for assignment expressions with := (see PEP 572, Python 3.8+)
#ifndef MICROPY_PY_ASSIGN_EXPR
#define MICROPY_PY_ASSIGN_EXPR (1)
#endif
// Non-standard .pend_throw() method for generators, allowing for
// Future-like behavior with respect to exception handling: an
// exception set with .pend_throw() will activate on the next call
@@ -830,7 +905,7 @@ typedef double mp_float_t;
// Whether str.center() method provided
#ifndef MICROPY_PY_BUILTINS_STR_CENTER
#define MICROPY_PY_BUILTINS_STR_CENTER (1)
#define MICROPY_PY_BUILTINS_STR_CENTER (0)
#endif
// Whether str.count() method provided
@@ -850,7 +925,7 @@ typedef double mp_float_t;
// Whether str.splitlines() method provided
#ifndef MICROPY_PY_BUILTINS_STR_SPLITLINES
#define MICROPY_PY_BUILTINS_STR_SPLITLINES (1)
#define MICROPY_PY_BUILTINS_STR_SPLITLINES (0)
#endif
// Whether to support bytearray object
@@ -889,6 +964,11 @@ typedef double mp_float_t;
#define MICROPY_PY_BUILTINS_SLICE_ATTRS (0)
#endif
// Whether to support the .indices(len) method on slice objects
#ifndef MICROPY_PY_BUILTINS_SLICE_INDICES
#define MICROPY_PY_BUILTINS_SLICE_INDICES (0)
#endif
// Whether to support frozenset object
#ifndef MICROPY_PY_BUILTINS_FROZENSET
#define MICROPY_PY_BUILTINS_FROZENSET (0)
@@ -923,31 +1003,26 @@ typedef double mp_float_t;
#define MICROPY_PY_BUILTINS_ROUND_INT (0)
#endif
// Whether to support timeout exceptions (like socket.timeout)
#ifndef MICROPY_PY_BUILTINS_TIMEOUTERROR
#define MICROPY_PY_BUILTINS_TIMEOUTERROR (0)
#endif
// Whether to support complete set of special methods for user
// classes, or only the most used ones. "Inplace" methods are
// controlled by MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS below.
// "Reverse" methods are controlled by
// MICROPY_PY_REVERSE_SPECIAL_METHODS below.
#ifndef MICROPY_PY_ALL_SPECIAL_METHODS
#define MICROPY_PY_ALL_SPECIAL_METHODS (1)
#define MICROPY_PY_ALL_SPECIAL_METHODS (0)
#endif
// Whether to support all inplace arithmetic operarion methods
// (__imul__, etc.)
#ifndef MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS
#define MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS (1)
#define MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS (0)
#endif
// Whether to support reverse arithmetic operarion methods
// (__radd__, etc.). Additionally gated by
// MICROPY_PY_ALL_SPECIAL_METHODS.
#ifndef MICROPY_PY_REVERSE_SPECIAL_METHODS
#define MICROPY_PY_REVERSE_SPECIAL_METHODS (1)
#define MICROPY_PY_REVERSE_SPECIAL_METHODS (0)
#endif
// Whether to support compile function
@@ -987,7 +1062,7 @@ typedef double mp_float_t;
#endif
// Whether to provide the built-in input() function. The implementation of this
// uses mp-readline, so can only be enabled if the port uses this readline.
// uses shared/readline, so can only be enabled if the port uses this readline.
#ifndef MICROPY_PY_BUILTINS_INPUT
#define MICROPY_PY_BUILTINS_INPUT (0)
#endif
@@ -1033,6 +1108,11 @@ typedef double mp_float_t;
#define MICROPY_PY_MICROPYTHON_STACK_USE (MICROPY_PY_MICROPYTHON_MEM_INFO)
#endif
// Whether to provide the "micropython.heap_locked" function
#ifndef MICROPY_PY_MICROPYTHON_HEAP_LOCKED
#define MICROPY_PY_MICROPYTHON_HEAP_LOCKED (0)
#endif
// Whether to provide "array" module. Note that large chunk of the
// underlying code is shared with "bytearray" builtin type, so to
// get real savings, it should be disabled too.
@@ -1092,6 +1172,26 @@ typedef double mp_float_t;
#define MICROPY_PY_MATH_ISCLOSE (0)
#endif
// Whether to provide fix for atan2 Inf handling.
#ifndef MICROPY_PY_MATH_ATAN2_FIX_INFNAN
#define MICROPY_PY_MATH_ATAN2_FIX_INFNAN (0)
#endif
// Whether to provide fix for fmod Inf handling.
#ifndef MICROPY_PY_MATH_FMOD_FIX_INFNAN
#define MICROPY_PY_MATH_FMOD_FIX_INFNAN (0)
#endif
// Whether to provide fix for modf negative zero handling.
#ifndef MICROPY_PY_MATH_MODF_FIX_NEGZERO
#define MICROPY_PY_MATH_MODF_FIX_NEGZERO (0)
#endif
// Whether to provide fix for pow(1, NaN) and pow(NaN, 0), which both should be 1 not NaN.
#ifndef MICROPY_PY_MATH_POW_FIX_NAN
#define MICROPY_PY_MATH_POW_FIX_NAN (0)
#endif
// Whether to provide "cmath" module
#ifndef MICROPY_PY_CMATH
#define MICROPY_PY_CMATH (0)
@@ -1215,6 +1315,13 @@ typedef double mp_float_t;
#define MICROPY_PY_USELECT (0)
#endif
// Whether to enable the select() function in the "uselect" module (baremetal
// implementation). This is present for compatibility but can be disabled to
// save space.
#ifndef MICROPY_PY_USELECT_SELECT
#define MICROPY_PY_USELECT_SELECT (1)
#endif
// Whether to provide "utime" module functions implementation
// in terms of mp_hal_* functions.
#ifndef MICROPY_PY_UTIME_MP_HAL
@@ -1250,6 +1357,10 @@ typedef double mp_float_t;
// Extended modules
#ifndef MICROPY_PY_UASYNCIO
#define MICROPY_PY_UASYNCIO (0)
#endif
#ifndef MICROPY_PY_UCTYPES
#define MICROPY_PY_UCTYPES (0)
#endif
@@ -1268,6 +1379,11 @@ typedef double mp_float_t;
#define MICROPY_PY_UJSON (0)
#endif
// Whether to support the "separators" argument to dump, dumps
#ifndef MICROPY_PY_UJSON_SEPARATORS
#define MICROPY_PY_UJSON_SEPARATORS (1)
#endif
#ifndef MICROPY_PY_URE
#define MICROPY_PY_URE (0)
#endif
@@ -1348,6 +1464,11 @@ typedef double mp_float_t;
#define MICROPY_PY_MACHINE (0)
#endif
// Whether to include: bitstream
#ifndef MICROPY_PY_MACHINE_BITSTREAM
#define MICROPY_PY_MACHINE_BITSTREAM (0)
#endif
// Whether to include: time_pulse_us
#ifndef MICROPY_PY_MACHINE_PULSE
#define MICROPY_PY_MACHINE_PULSE (0)
@@ -1402,6 +1523,21 @@ typedef double mp_float_t;
#define MICROPY_PORT_ROOT_POINTERS
#endif
/*****************************************************************************/
/* Hooks for a port to wrap functions with attributes */
#ifndef MICROPY_WRAP_MP_SCHED_EXCEPTION
#define MICROPY_WRAP_MP_SCHED_EXCEPTION(f) f
#endif
#ifndef MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT
#define MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT(f) f
#endif
#ifndef MICROPY_WRAP_MP_SCHED_SCHEDULE
#define MICROPY_WRAP_MP_SCHED_SCHEDULE(f) f
#endif
/*****************************************************************************/
/* Miscellaneous settings */
@@ -1427,15 +1563,17 @@ typedef double mp_float_t;
#define STATIC static
#endif
// Number of bytes in a word
#ifndef BYTES_PER_WORD
#define BYTES_PER_WORD (sizeof(mp_uint_t))
// Number of bytes in an object word: mp_obj_t, mp_uint_t, mp_uint_t
#ifndef MP_BYTES_PER_OBJ_WORD
#define MP_BYTES_PER_OBJ_WORD (sizeof(mp_uint_t))
#endif
#define BITS_PER_BYTE (8)
#define BITS_PER_WORD (BITS_PER_BYTE * BYTES_PER_WORD)
// Number of bits in a byte
#ifndef MP_BITS_PER_BYTE
#define MP_BITS_PER_BYTE (8)
#endif
// mp_int_t value with most significant bit set
#define WORD_MSBIT_HIGH (((mp_uint_t)1) << (BYTES_PER_WORD * 8 - 1))
#define MP_OBJ_WORD_MSBIT_HIGH (((mp_uint_t)1) << (MP_BYTES_PER_OBJ_WORD * MP_BITS_PER_BYTE - 1))
// Make sure both MP_ENDIANNESS_LITTLE and MP_ENDIANNESS_BIG are
// defined and that they are the opposite of each other.
@@ -1444,7 +1582,7 @@ typedef double mp_float_t;
#elif defined(MP_ENDIANNESS_BIG)
#define MP_ENDIANNESS_LITTLE (!MP_ENDIANNESS_BIG)
#else
// Endianness not defined by port so try to autodetect it.
// Endianness not defined by port so try to autodetect it.
#if defined(__BYTE_ORDER__)
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define MP_ENDIANNESS_LITTLE (1)
@@ -1507,7 +1645,7 @@ typedef double mp_float_t;
#define UINT_FMT "%u"
#define INT_FMT "%d"
#endif
#endif //INT_FMT
#endif // INT_FMT
// Modifier for function which doesn't return
#ifndef NORETURN
@@ -1548,35 +1686,42 @@ typedef double mp_float_t;
#endif
#endif
// Explicitly annotate switch case fall throughs
#if defined(__GNUC__) && __GNUC__ >= 7
#define MP_FALLTHROUGH __attribute__((fallthrough));
#else
#define MP_FALLTHROUGH
#endif
#ifndef MP_HTOBE16
#if MP_ENDIANNESS_LITTLE
# define MP_HTOBE16(x) ((uint16_t)( (((x) & 0xff) << 8) | (((x) >> 8) & 0xff) ))
# define MP_BE16TOH(x) MP_HTOBE16(x)
#define MP_HTOBE16(x) ((uint16_t)((((x) & 0xff) << 8) | (((x) >> 8) & 0xff)))
#define MP_BE16TOH(x) MP_HTOBE16(x)
#else
# define MP_HTOBE16(x) (x)
# define MP_BE16TOH(x) (x)
#define MP_HTOBE16(x) (x)
#define MP_BE16TOH(x) (x)
#endif
#endif
#ifndef MP_HTOBE32
#if MP_ENDIANNESS_LITTLE
# define MP_HTOBE32(x) ((uint32_t)( (((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | (((x) >> 8) & 0xff00) | (((x) >> 24) & 0xff) ))
# define MP_BE32TOH(x) MP_HTOBE32(x)
#define MP_HTOBE32(x) ((uint32_t)((((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | (((x) >> 8) & 0xff00) | (((x) >> 24) & 0xff)))
#define MP_BE32TOH(x) MP_HTOBE32(x)
#else
# define MP_HTOBE32(x) (x)
# define MP_BE32TOH(x) (x)
#define MP_HTOBE32(x) (x)
#define MP_BE32TOH(x) (x)
#endif
#endif
// Warning categories are by default implemented as strings, though
// hook is left for a port to define them as something else.
#if MICROPY_WARNINGS_CATEGORY
# ifndef MP_WARN_CAT
# define MP_WARN_CAT(x) #x
# endif
#ifndef MP_WARN_CAT
#define MP_WARN_CAT(x) #x
#endif
#else
# undef MP_WARN_CAT
# define MP_WARN_CAT(x) (NULL)
#undef MP_WARN_CAT
#define MP_WARN_CAT(x) (NULL)
#endif
// Feature dependency check.

View File

@@ -81,6 +81,7 @@
#define MP_EHOSTUNREACH (113) // No route to host
#define MP_EALREADY (114) // Operation already in progress
#define MP_EINPROGRESS (115) // Operation now in progress
#define MP_ECANCELED (125) // Operation canceled
#else
@@ -136,6 +137,7 @@
#define MP_EHOSTUNREACH EHOSTUNREACH
#define MP_EALREADY EALREADY
#define MP_EINPROGRESS EINPROGRESS
#define MP_ECANCELED ECANCELED
#endif

View File

@@ -26,6 +26,7 @@
#ifndef MICROPY_INCLUDED_PY_MPHAL_H
#define MICROPY_INCLUDED_PY_MPHAL_H
#include <stdint.h>
#include "py/mpconfig.h"
#ifdef MICROPY_MPHALPORT_H
@@ -74,6 +75,11 @@ mp_uint_t mp_hal_ticks_us(void);
mp_uint_t mp_hal_ticks_cpu(void);
#endif
#ifndef mp_hal_time_ns
// Nanoseconds since the Epoch.
uint64_t mp_hal_time_ns(void);
#endif
// If port HAL didn't define its own pin API, use generic
// "virtual pin" API from the core.
#ifndef mp_hal_pin_obj_t

View File

@@ -269,14 +269,14 @@ int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char
// We add the pad in this function, so since the pad goes after
// the sign & prefix, we format without a prefix
str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
x, base, NULL, base_char, comma);
x, base, NULL, base_char, comma);
if (*str == '-') {
sign = *str++;
fmt_size--;
}
} else {
str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
x, base, prefix, base_char, comma);
x, base, prefix, base_char, comma);
}
int spaces_before = 0;
@@ -347,8 +347,7 @@ int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, c
if (flags & PF_FLAG_SHOW_SIGN) {
sign = '+';
}
else
} else
if (flags & PF_FLAG_SPACE_SIGN) {
sign = ' ';
}
@@ -411,14 +410,20 @@ int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
int flags = 0;
char fill = ' ';
while (*fmt != '\0') {
if (*fmt == '-') flags |= PF_FLAG_LEFT_ADJUST;
else if (*fmt == '+') flags |= PF_FLAG_SHOW_SIGN;
else if (*fmt == ' ') flags |= PF_FLAG_SPACE_SIGN;
else if (*fmt == '!') flags |= PF_FLAG_NO_TRAILZ;
else if (*fmt == '0') {
if (*fmt == '-') {
flags |= PF_FLAG_LEFT_ADJUST;
} else if (*fmt == '+') {
flags |= PF_FLAG_SHOW_SIGN;
} else if (*fmt == ' ') {
flags |= PF_FLAG_SPACE_SIGN;
} else if (*fmt == '!') {
flags |= PF_FLAG_NO_TRAILZ;
} else if (*fmt == '0') {
flags |= PF_FLAG_PAD_AFTER_SIGN;
fill = '0';
} else break;
} else {
break;
}
++fmt;
}
@@ -470,26 +475,23 @@ int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
chrs += mp_print_strn(print, "false", 5, flags, fill, width);
}
break;
case 'c':
{
case 'c': {
char str = va_arg(args, int);
chrs += mp_print_strn(print, &str, 1, flags, fill, width);
break;
}
case 'q':
{
case 'q': {
qstr qst = va_arg(args, qstr);
size_t len;
const char *str = (const char*)qstr_data(qst, &len);
if (prec < 0) {
prec = len;
const char *str = (const char *)qstr_data(qst, &len);
if (prec >= 0 && (size_t)prec < len) {
len = prec;
}
chrs += mp_print_strn(print, str, prec, flags, fill, width);
chrs += mp_print_strn(print, str, len, flags, fill, width);
break;
}
case 's':
{
const char *str = va_arg(args, const char*);
case 's': {
const char *str = va_arg(args, const char *);
#ifndef NDEBUG
// With debugging enabled, catch printing of null string pointers
if (prec != 0 && str == NULL) {
@@ -497,10 +499,11 @@ int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
break;
}
#endif
if (prec < 0) {
prec = strlen(str);
size_t len = strlen(str);
if (prec >= 0 && (size_t)prec < len) {
len = prec;
}
chrs += mp_print_strn(print, str, prec, flags, fill, width);
chrs += mp_print_strn(print, str, len, flags, fill, width);
break;
}
case 'd': {
@@ -532,35 +535,32 @@ int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
// Use unsigned long int to work on both ILP32 and LP64 systems
chrs += mp_print_int(print, va_arg(args, unsigned long int), 0, 16, 'a', flags, fill, width);
break;
#if MICROPY_PY_BUILTINS_FLOAT
#if MICROPY_PY_BUILTINS_FLOAT
case 'e':
case 'E':
case 'f':
case 'F':
case 'g':
case 'G':
{
#if ((MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT) || (MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE))
mp_float_t f = va_arg(args, double);
case 'G': {
#if ((MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT) || (MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE))
mp_float_t f = (mp_float_t)va_arg(args, double);
chrs += mp_print_float(print, f, *fmt, flags, fill, width, prec);
#else
#error Unknown MICROPY FLOAT IMPL
#endif
#else
#error Unknown MICROPY FLOAT IMPL
#endif
break;
}
#endif
// Because 'l' is eaten above, another 'l' means %ll. We need to support
// this length specifier for OBJ_REPR_D (64-bit NaN boxing).
// TODO Either enable this unconditionally, or provide a specific config var.
#endif
// Because 'l' is eaten above, another 'l' means %ll. We need to support
// this length specifier for OBJ_REPR_D (64-bit NaN boxing).
// TODO Either enable this unconditionally, or provide a specific config var.
#if (MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D) || defined(_WIN64)
case 'l': {
unsigned long long int arg_value = va_arg(args, unsigned long long int);
++fmt;
if (*fmt == 'u' || *fmt == 'd') {
chrs += mp_print_int(print, arg_value, *fmt == 'd', 10, 'a', flags, fill, width);
break;
}
assert(!"unsupported fmt char");
assert(*fmt == 'u' || *fmt == 'd' || !"unsupported fmt char");
chrs += mp_print_int(print, arg_value, *fmt == 'd', 10, 'a', flags, fill, width);
break;
}
#endif
default:

View File

@@ -40,9 +40,9 @@
#define PF_FLAG_SHOW_OCTAL_LETTER (0x200)
#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
# define MP_PYTHON_PRINTER &mp_sys_stdout_print
#define MP_PYTHON_PRINTER &mp_sys_stdout_print
#else
# define MP_PYTHON_PRINTER &mp_plat_print
#define MP_PYTHON_PRINTER &mp_plat_print
#endif
typedef void (*mp_print_strn_t)(void *data, const char *str, size_t len);
@@ -52,6 +52,14 @@ typedef struct _mp_print_t {
mp_print_strn_t print_strn;
} mp_print_t;
typedef struct _mp_print_ext_t {
mp_print_t base;
const char *item_separator;
const char *key_separator;
}mp_print_ext_t;
#define MP_PRINT_GET_EXT(print) ((mp_print_ext_t *)print)
// All (non-debug) prints go through one of the two interfaces below.
// 1) Wrapper for platform print function, which wraps MP_PLAT_PRINT_STRN.
extern const mp_print_t mp_plat_print;

View File

@@ -80,7 +80,6 @@ typedef struct _mp_state_mem_t {
int gc_stack_overflow;
MICROPY_GC_STACK_ENTRY_TYPE gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
uint16_t gc_lock_depth;
// This variable controls auto garbage collection. If set to 0 then the
// GC won't automatically run when gc_alloc can't find enough blocks. But
@@ -98,7 +97,7 @@ typedef struct _mp_state_mem_t {
size_t gc_collected;
#endif
#if MICROPY_PY_THREAD
#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
// This is a global mutex used to make the GC thread-safe.
mp_thread_mutex_t gc_mutex;
#endif
@@ -138,9 +137,6 @@ typedef struct _mp_state_vm_t {
// dictionary with loaded modules (may be exposed as sys.modules)
mp_obj_dict_t mp_loaded_modules_dict;
// pending exception object (MP_OBJ_NULL if not pending)
volatile mp_obj_t mp_pending_exception;
#if MICROPY_ENABLE_SCHEDULER
mp_sched_item_t sched_queue[MICROPY_SCHEDULER_DEPTH];
#endif
@@ -167,6 +163,11 @@ typedef struct _mp_state_vm_t {
mp_obj_dict_t *mp_module_builtins_override_dict;
#endif
#if MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
// An mp_obj_list_t that tracks relocated native code to prevent the GC from reclaiming them.
mp_obj_t track_reloc_code_list;
#endif
// include any root pointers defined by a port
MICROPY_PORT_ROOT_POINTERS
@@ -203,7 +204,7 @@ typedef struct _mp_state_vm_t {
size_t qstr_last_alloc;
size_t qstr_last_used;
#if MICROPY_PY_THREAD
#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
// This is a global mutex used to make qstr interning thread-safe.
mp_thread_mutex_t qstr_mutex;
#endif
@@ -248,6 +249,9 @@ typedef struct _mp_state_thread_t {
uint8_t *pystack_cur;
#endif
// Locking of the GC is done per thread.
uint16_t gc_lock_depth;
////////////////////////////////////////////////////////////
// START ROOT POINTER SECTION
// Everything that needs GC scanning must start here, and
@@ -259,6 +263,12 @@ typedef struct _mp_state_thread_t {
nlr_buf_t *nlr_top;
// pending exception object (MP_OBJ_NULL if not pending)
volatile mp_obj_t mp_pending_exception;
// If MP_OBJ_STOP_ITERATION is propagated then this holds its argument.
mp_obj_t stop_iteration_arg;
#if MICROPY_PY_SYS_SETTRACE
mp_obj_t prof_trace_callback;
bool prof_callback_is_executing;
@@ -278,12 +288,13 @@ extern mp_state_ctx_t mp_state_ctx;
#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
#define MP_STATE_MAIN_THREAD(x) (mp_state_ctx.thread.x)
#if MICROPY_PY_THREAD
extern mp_state_thread_t *mp_thread_get_state(void);
#define MP_STATE_THREAD(x) (mp_thread_get_state()->x)
#else
#define MP_STATE_THREAD(x) (mp_state_ctx.thread.x)
#define MP_STATE_THREAD(x) MP_STATE_MAIN_THREAD(x)
#endif
#endif // MICROPY_INCLUDED_PY_MPSTATE_H

View File

@@ -30,17 +30,17 @@
#if MICROPY_PY_THREAD
struct _mp_state_thread_t;
#ifdef MICROPY_MPTHREADPORT_H
#include MICROPY_MPTHREADPORT_H
#else
#include <mpthreadport.h>
#endif
struct _mp_state_thread_t;
struct _mp_state_thread_t *mp_thread_get_state(void);
void mp_thread_set_state(void *state);
void mp_thread_create(void *(*entry)(void*), void *arg, size_t *stack_size);
void mp_thread_set_state(struct _mp_state_thread_t *state);
void mp_thread_create(void *(*entry)(void *), void *arg, size_t *stack_size);
void mp_thread_start(void);
void mp_thread_finish(void);
void mp_thread_mutex_init(mp_thread_mutex_t *mutex);

View File

@@ -60,13 +60,21 @@ STATIC size_t mpn_remove_trailing_zeros(mpz_dig_t *oidig, mpz_dig_t *idig) {
assumes i, j are normalised
*/
STATIC int mpn_cmp(const mpz_dig_t *idig, size_t ilen, const mpz_dig_t *jdig, size_t jlen) {
if (ilen < jlen) { return -1; }
if (ilen > jlen) { return 1; }
if (ilen < jlen) {
return -1;
}
if (ilen > jlen) {
return 1;
}
for (idig += ilen, jdig += ilen; ilen > 0; --ilen) {
mpz_dbl_dig_signed_t cmp = (mpz_dbl_dig_t)*(--idig) - (mpz_dbl_dig_t)*(--jdig);
if (cmp < 0) { return -1; }
if (cmp > 0) { return 1; }
if (cmp < 0) {
return -1;
}
if (cmp > 0) {
return 1;
}
}
return 0;
@@ -228,7 +236,7 @@ STATIC size_t mpn_and(mpz_dig_t *idig, const mpz_dig_t *jdig, const mpz_dig_t *k
can have i, j, k pointing to same memory
*/
STATIC size_t mpn_and_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dig_t *oidig = idig;
mpz_dig_t imask = (0 == carryi) ? 0 : DIG_MASK;
mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
@@ -289,7 +297,7 @@ STATIC size_t mpn_or(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const
#if MICROPY_OPT_MPZ_BITWISE
STATIC size_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dig_t *oidig = idig;
mpz_dbl_dig_t carryi = 1;
mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
@@ -319,7 +327,7 @@ STATIC size_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, co
#else
STATIC size_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dig_t *oidig = idig;
mpz_dig_t imask = (0 == carryi) ? 0 : DIG_MASK;
mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
@@ -378,7 +386,7 @@ STATIC size_t mpn_xor(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const
can have i, j, k pointing to same memory
*/
STATIC size_t mpn_xor_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
mpz_dig_t *oidig = idig;
for (; jlen > 0; ++idig, ++jdig) {
@@ -523,60 +531,37 @@ STATIC void mpn_div(mpz_dig_t *num_dig, size_t *num_len, const mpz_dig_t *den_di
quo /= lead_den_digit;
// Multiply quo by den and subtract from num to get remainder.
// We have different code here to handle different compile-time
// configurations of mpz:
//
// 1. DIG_SIZE is stricly less than half the number of bits
// available in mpz_dbl_dig_t. In this case we can use a
// slightly more optimal (in time and space) routine that
// uses the extra bits in mpz_dbl_dig_signed_t to store a
// sign bit.
//
// 2. DIG_SIZE is exactly half the number of bits available in
// mpz_dbl_dig_t. In this (common) case we need to be careful
// not to overflow the borrow variable. And the shifting of
// borrow needs some special logic (it's a shift right with
// round up).
//
// Must be careful with overflow of the borrow variable. Both
// borrow and low_digs are signed values and need signed right-shift,
// but x is unsigned and may take a full-range value.
const mpz_dig_t *d = den_dig;
mpz_dbl_dig_t d_norm = 0;
mpz_dbl_dig_t borrow = 0;
mpz_dbl_dig_signed_t borrow = 0;
for (mpz_dig_t *n = num_dig - den_len; n < num_dig; ++n, ++d) {
// Get the next digit in (den).
d_norm = ((mpz_dbl_dig_t)*d << norm_shift) | (d_norm >> DIG_SIZE);
// Multiply the next digit in (quo * den).
mpz_dbl_dig_t x = (mpz_dbl_dig_t)quo * (d_norm & DIG_MASK);
#if DIG_SIZE < MPZ_DBL_DIG_SIZE / 2
borrow += (mpz_dbl_dig_t)*n - x; // will overflow if DIG_SIZE >= MPZ_DBL_DIG_SIZE/2
*n = borrow & DIG_MASK;
borrow = (mpz_dbl_dig_signed_t)borrow >> DIG_SIZE;
#else // DIG_SIZE == MPZ_DBL_DIG_SIZE / 2
if (x >= *n || *n - x <= borrow) {
borrow += x - (mpz_dbl_dig_t)*n;
*n = (-borrow) & DIG_MASK;
borrow = (borrow >> DIG_SIZE) + ((borrow & DIG_MASK) == 0 ? 0 : 1); // shift-right with round-up
} else {
*n = ((mpz_dbl_dig_t)*n - x - borrow) & DIG_MASK;
borrow = 0;
}
#endif
// Compute the low DIG_MASK bits of the next digit in (num - quo * den)
mpz_dbl_dig_signed_t low_digs = (borrow & DIG_MASK) + *n - (x & DIG_MASK);
// Store the digit result for (num).
*n = low_digs & DIG_MASK;
// Compute the borrow, shifted right before summing to avoid overflow.
borrow = (borrow >> DIG_SIZE) - (x >> DIG_SIZE) + (low_digs >> DIG_SIZE);
}
#if DIG_SIZE < MPZ_DBL_DIG_SIZE / 2
// Borrow was negative in the above for-loop, make it positive for next if-block.
borrow = -borrow;
#endif
// At this point we have either:
//
// 1. quo was the correct value and the most-sig-digit of num is exactly
// cancelled by borrow (borrow == *num_dig). In this case there is
// cancelled by borrow (borrow + *num_dig == 0). In this case there is
// nothing more to do.
//
// 2. quo was too large, we subtracted too many den from num, and the
// most-sig-digit of num is 1 less than borrow (borrow == *num_dig + 1).
// most-sig-digit of num is less than needed (borrow + *num_dig < 0).
// In this case we must reduce quo and add back den to num until the
// carry from this operation cancels out the borrow.
//
borrow -= *num_dig;
borrow += *num_dig;
for (; borrow != 0; --quo) {
d = den_dig;
d_norm = 0;
@@ -587,7 +572,7 @@ STATIC void mpn_div(mpz_dig_t *num_dig, size_t *num_len, const mpz_dig_t *den_di
*n = carry & DIG_MASK;
carry >>= DIG_SIZE;
}
borrow -= carry;
borrow += carry;
}
// store this digit of the quotient
@@ -756,7 +741,7 @@ void mpz_set_from_ll(mpz_t *z, long long val, bool is_signed) {
unsigned long long uval;
if (is_signed && val < 0) {
z->neg = 1;
uval = -val;
uval = -(unsigned long long)val;
} else {
z->neg = 0;
uval = val;
@@ -771,20 +756,7 @@ void mpz_set_from_ll(mpz_t *z, long long val, bool is_signed) {
#if MICROPY_PY_BUILTINS_FLOAT
void mpz_set_from_float(mpz_t *z, mp_float_t src) {
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
typedef uint64_t mp_float_int_t;
#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
typedef uint32_t mp_float_int_t;
#endif
union {
mp_float_t f;
#if MP_ENDIANNESS_LITTLE
struct { mp_float_int_t frc:MP_FLOAT_FRAC_BITS, exp:MP_FLOAT_EXP_BITS, sgn:1; } p;
#else
struct { mp_float_int_t sgn:1, exp:MP_FLOAT_EXP_BITS, frc:MP_FLOAT_FRAC_BITS; } p;
#endif
} u = {src};
mp_float_union_t u = {src};
z->neg = u.p.sgn;
if (u.p.exp == 0) {
// value == 0 || value < 1
@@ -806,7 +778,7 @@ typedef uint32_t mp_float_int_t;
const int dig_cnt = (adj_exp + 1 + (DIG_SIZE - 1)) / DIG_SIZE;
const unsigned int rem = adj_exp % DIG_SIZE;
int dig_ind, shft;
mp_float_int_t frc = u.p.frc | ((mp_float_int_t)1 << MP_FLOAT_FRAC_BITS);
mp_float_uint_t frc = u.p.frc | ((mp_float_uint_t)1 << MP_FLOAT_FRAC_BITS);
if (adj_exp < MP_FLOAT_FRAC_BITS) {
shft = 0;
@@ -825,16 +797,16 @@ typedef uint32_t mp_float_int_t;
z->dig[dig_ind++] = (frc << shft) & DIG_MASK;
frc >>= DIG_SIZE - shft;
}
#if DIG_SIZE < (MP_FLOAT_FRAC_BITS + 1)
#if DIG_SIZE < (MP_FLOAT_FRAC_BITS + 1)
while (dig_ind != dig_cnt) {
z->dig[dig_ind++] = frc & DIG_MASK;
frc >>= DIG_SIZE;
}
#else
#else
if (dig_ind != dig_cnt) {
z->dig[dig_ind] = frc;
}
#endif
#endif
}
}
}
@@ -857,7 +829,7 @@ size_t mpz_set_from_str(mpz_t *z, const char *str, size_t len, bool neg, unsigne
z->len = 0;
for (; cur < top; ++cur) { // XXX UTF8 next char
//mp_uint_t v = char_to_numeric(cur#); // XXX UTF8 get char
// mp_uint_t v = char_to_numeric(cur#); // XXX UTF8 get char
mp_uint_t v = *cur;
if ('0' <= v && v <= '9') {
v -= '0';
@@ -948,28 +920,48 @@ int mpz_cmp(const mpz_t *z1, const mpz_t *z2) {
mp_int_t mpz_cmp_sml_int(const mpz_t *z, mp_int_t sml_int) {
mp_int_t cmp;
if (z->neg == 0) {
if (sml_int < 0) return 1;
if (sml_int == 0) {
if (z->len == 0) return 0;
if (sml_int < 0) {
return 1;
}
if (z->len == 0) return -1;
assert(sml_int < (1 << DIG_SIZE));
if (z->len != 1) return 1;
cmp = z->dig[0] - sml_int;
} else {
if (sml_int > 0) return -1;
if (sml_int == 0) {
if (z->len == 0) return 0;
if (z->len == 0) {
return 0;
}
return 1;
}
if (z->len == 0) {
return -1;
}
if (z->len == 0) return 1;
assert(sml_int < (1 << DIG_SIZE));
if (z->len != 1) {
return 1;
}
cmp = z->dig[0] - sml_int;
} else {
if (sml_int > 0) {
return -1;
}
if (sml_int == 0) {
if (z->len == 0) {
return 0;
}
return -1;
}
if (z->len == 0) {
return 1;
}
assert(sml_int > -(1 << DIG_SIZE));
if (z->len != 1) return -1;
if (z->len != 1) {
return -1;
}
cmp = -z->dig[0] - sml_int;
}
if (cmp < 0) return -1;
if (cmp > 0) return 1;
if (cmp < 0) {
return -1;
}
if (cmp > 0) {
return 1;
}
return 0;
}
#endif
@@ -1207,7 +1199,7 @@ void mpz_and_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
} else {
mpz_need_dig(dest, lhs->len + 1);
dest->len = mpn_and_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
lhs->neg == rhs->neg, 0 != lhs->neg, 0 != rhs->neg);
lhs->neg == rhs->neg, 0 != lhs->neg, 0 != rhs->neg);
dest->neg = lhs->neg & rhs->neg;
}
@@ -1215,7 +1207,7 @@ void mpz_and_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
dest->len = mpn_and_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
(lhs->neg == rhs->neg) ? lhs->neg : 0, lhs->neg, rhs->neg);
(lhs->neg == rhs->neg) ? lhs->neg : 0, lhs->neg, rhs->neg);
dest->neg = lhs->neg & rhs->neg;
#endif
@@ -1241,7 +1233,7 @@ void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
} else {
mpz_need_dig(dest, lhs->len + 1);
dest->len = mpn_or_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
0 != lhs->neg, 0 != rhs->neg);
0 != lhs->neg, 0 != rhs->neg);
dest->neg = 1;
}
@@ -1249,7 +1241,7 @@ void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
dest->len = mpn_or_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
(lhs->neg || rhs->neg), lhs->neg, rhs->neg);
(lhs->neg || rhs->neg), lhs->neg, rhs->neg);
dest->neg = lhs->neg | rhs->neg;
#endif
@@ -1279,7 +1271,7 @@ void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
} else {
mpz_need_dig(dest, lhs->len + 1);
dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len, 1,
0 == lhs->neg, 0 == rhs->neg);
0 == lhs->neg, 0 == rhs->neg);
dest->neg = 1;
}
@@ -1287,7 +1279,7 @@ void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
(lhs->neg != rhs->neg), 0 == lhs->neg, 0 == rhs->neg);
(lhs->neg != rhs->neg), 0 == lhs->neg, 0 == rhs->neg);
dest->neg = lhs->neg ^ rhs->neg;
#endif
@@ -1376,7 +1368,8 @@ void mpz_pow3_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs, const mpz_t
mpz_t *x = mpz_clone(lhs);
mpz_t *n = mpz_clone(rhs);
mpz_t quo; mpz_init_zero(&quo);
mpz_t quo;
mpz_init_zero(&quo);
while (n->len > 0) {
if ((n->dig[0] & 1) != 0) {
@@ -1419,7 +1412,8 @@ mpz_t *mpz_gcd(const mpz_t *z1, const mpz_t *z2) {
mpz_t *a = mpz_clone(z1);
mpz_t *b = mpz_clone(z2);
mpz_t c; mpz_init_zero(&c);
mpz_t c;
mpz_init_zero(&c);
a->neg = 0;
b->neg = 0;
@@ -1430,7 +1424,9 @@ mpz_t *mpz_gcd(const mpz_t *z1, const mpz_t *z2) {
mpz_deinit(&c);
return b;
}
mpz_t *t = a; a = b; b = t;
mpz_t *t = a;
a = b;
b = t;
}
if (!(b->len >= 2 || (b->len == 1 && b->dig[0] > 1))) { // compute b > 0; could be mpz_cmp_small_int(b, 1) > 0
break;
@@ -1497,7 +1493,8 @@ void mpz_divmod_inpl(mpz_t *dest_quo, mpz_t *dest_rem, const mpz_t *lhs, const m
if (lhs->neg != rhs->neg) {
dest_quo->neg = 1;
if (!mpz_is_zero(dest_rem)) {
mpz_t mpzone; mpz_init_from_int(&mpzone, -1);
mpz_t mpzone;
mpz_init_from_int(&mpzone, -1);
mpz_add_inpl(dest_quo, dest_quo, &mpzone);
mpz_add_inpl(dest_rem, dest_rem, rhs);
}
@@ -1512,7 +1509,8 @@ these functions are unused
*/
mpz_t *mpz_div(const mpz_t *lhs, const mpz_t *rhs) {
mpz_t *quo = mpz_zero();
mpz_t rem; mpz_init_zero(&rem);
mpz_t rem;
mpz_init_zero(&rem);
mpz_divmod_inpl(quo, &rem, lhs, rhs);
mpz_deinit(&rem);
return quo;
@@ -1522,7 +1520,8 @@ mpz_t *mpz_div(const mpz_t *lhs, const mpz_t *rhs) {
can have lhs, rhs the same
*/
mpz_t *mpz_mod(const mpz_t *lhs, const mpz_t *rhs) {
mpz_t quo; mpz_init_zero(&quo);
mpz_t quo;
mpz_init_zero(&quo);
mpz_t *rem = mpz_zero();
mpz_divmod_inpl(&quo, rem, lhs, rhs);
mpz_deinit(&quo);
@@ -1551,7 +1550,7 @@ bool mpz_as_int_checked(const mpz_t *i, mp_int_t *value) {
mpz_dig_t *d = i->dig + i->len;
while (d-- > i->dig) {
if (val > (~(WORD_MSBIT_HIGH) >> DIG_SIZE)) {
if (val > (~(MP_OBJ_WORD_MSBIT_HIGH) >> DIG_SIZE)) {
// will overflow
return false;
}
@@ -1576,7 +1575,7 @@ bool mpz_as_uint_checked(const mpz_t *i, mp_uint_t *value) {
mpz_dig_t *d = i->dig + i->len;
while (d-- > i->dig) {
if (val > (~(WORD_MSBIT_HIGH) >> (DIG_SIZE - 1))) {
if (val > (~(MP_OBJ_WORD_MSBIT_HIGH) >> (DIG_SIZE - 1))) {
// will overflow
return false;
}
@@ -1587,7 +1586,6 @@ bool mpz_as_uint_checked(const mpz_t *i, mp_uint_t *value) {
return true;
}
// writes at most len bytes to buf (so buf should be zeroed before calling)
void mpz_as_bytes(const mpz_t *z, bool big_endian, size_t len, byte *buf) {
byte *b = buf;
if (big_endian) {
@@ -1619,6 +1617,15 @@ void mpz_as_bytes(const mpz_t *z, bool big_endian, size_t len, byte *buf) {
}
}
}
// fill remainder of buf with zero/sign extension of the integer
if (big_endian) {
len = b - buf;
} else {
len = buf + len - b;
buf = b;
}
memset(buf, z->neg ? 0xff : 0x00, len);
}
#if MICROPY_PY_BUILTINS_FLOAT
@@ -1659,8 +1666,9 @@ size_t mpz_as_str_inpl(const mpz_t *i, unsigned int base, const char *prefix, ch
char *s = str;
if (ilen == 0) {
if (prefix) {
while (*prefix)
while (*prefix) {
*s++ = *prefix++;
}
}
*s++ = '0';
*s = '\0';

View File

@@ -46,10 +46,10 @@
#ifndef MPZ_DIG_SIZE
#if defined(__x86_64__) || defined(_WIN64)
// 64-bit machine, using 32-bit storage for digits
// 64-bit machine, using 32-bit storage for digits
#define MPZ_DIG_SIZE (32)
#else
// default: 32-bit machine, using 16-bit storage for digits
// default: 32-bit machine, using 16-bit storage for digits
#define MPZ_DIG_SIZE (16)
#endif
#endif
@@ -93,13 +93,13 @@ typedef int8_t mpz_dbl_dig_signed_t;
typedef struct _mpz_t {
size_t neg : 1;
size_t fixed_dig : 1;
size_t alloc : 8 * sizeof(size_t) - 2;
size_t alloc : (8 * sizeof(size_t) - 2);
size_t len;
mpz_dig_t *dig;
} mpz_t;
// convenience macro to declare an mpz with a digit array from the stack, initialised by an integer
#define MPZ_CONST_INT(z, val) mpz_t z; mpz_dig_t z ## _digits[MPZ_NUM_DIG_FOR_INT]; mpz_init_fixed_from_int(&z, z_digits, MPZ_NUM_DIG_FOR_INT, val);
#define MPZ_CONST_INT(z, val) mpz_t z; mpz_dig_t z##_digits[MPZ_NUM_DIG_FOR_INT]; mpz_init_fixed_from_int(&z, z_digits, MPZ_NUM_DIG_FOR_INT, val);
void mpz_init_zero(mpz_t *z);
void mpz_init_from_int(mpz_t *z, mp_int_t val);
@@ -115,8 +115,12 @@ void mpz_set_from_float(mpz_t *z, mp_float_t src);
size_t mpz_set_from_str(mpz_t *z, const char *str, size_t len, bool neg, unsigned int base);
void mpz_set_from_bytes(mpz_t *z, bool big_endian, size_t len, const byte *buf);
static inline bool mpz_is_zero(const mpz_t *z) { return z->len == 0; }
static inline bool mpz_is_neg(const mpz_t *z) { return z->len != 0 && z->neg != 0; }
static inline bool mpz_is_zero(const mpz_t *z) {
return z->len == 0;
}
static inline bool mpz_is_neg(const mpz_t *z) {
return z->len != 0 && z->neg != 0;
}
int mpz_cmp(const mpz_t *lhs, const mpz_t *rhs);
void mpz_abs_inpl(mpz_t *dest, const mpz_t *z);
@@ -134,7 +138,9 @@ void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
void mpz_divmod_inpl(mpz_t *dest_quo, mpz_t *dest_rem, const mpz_t *lhs, const mpz_t *rhs);
static inline size_t mpz_max_num_bits(const mpz_t *z) { return z->len * MPZ_DIG_SIZE; }
static inline size_t mpz_max_num_bits(const mpz_t *z) {
return z->len * MPZ_DIG_SIZE;
}
mp_int_t mpz_hash(const mpz_t *z);
bool mpz_as_int_checked(const mpz_t *z, mp_int_t *value);
bool mpz_as_uint_checked(const mpz_t *z, mp_uint_t *value);

View File

@@ -44,15 +44,24 @@
int mp_native_type_from_qstr(qstr qst) {
switch (qst) {
case MP_QSTR_object: return MP_NATIVE_TYPE_OBJ;
case MP_QSTR_bool: return MP_NATIVE_TYPE_BOOL;
case MP_QSTR_int: return MP_NATIVE_TYPE_INT;
case MP_QSTR_uint: return MP_NATIVE_TYPE_UINT;
case MP_QSTR_ptr: return MP_NATIVE_TYPE_PTR;
case MP_QSTR_ptr8: return MP_NATIVE_TYPE_PTR8;
case MP_QSTR_ptr16: return MP_NATIVE_TYPE_PTR16;
case MP_QSTR_ptr32: return MP_NATIVE_TYPE_PTR32;
default: return -1;
case MP_QSTR_object:
return MP_NATIVE_TYPE_OBJ;
case MP_QSTR_bool:
return MP_NATIVE_TYPE_BOOL;
case MP_QSTR_int:
return MP_NATIVE_TYPE_INT;
case MP_QSTR_uint:
return MP_NATIVE_TYPE_UINT;
case MP_QSTR_ptr:
return MP_NATIVE_TYPE_PTR;
case MP_QSTR_ptr8:
return MP_NATIVE_TYPE_PTR8;
case MP_QSTR_ptr16:
return MP_NATIVE_TYPE_PTR16;
case MP_QSTR_ptr32:
return MP_NATIVE_TYPE_PTR32;
default:
return -1;
}
}
@@ -60,10 +69,13 @@ int mp_native_type_from_qstr(qstr qst) {
mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type) {
DEBUG_printf("mp_native_from_obj(%p, " UINT_FMT ")\n", obj, type);
switch (type & 0xf) {
case MP_NATIVE_TYPE_OBJ: return (mp_uint_t)obj;
case MP_NATIVE_TYPE_BOOL: return mp_obj_is_true(obj);
case MP_NATIVE_TYPE_OBJ:
return (mp_uint_t)obj;
case MP_NATIVE_TYPE_BOOL:
return mp_obj_is_true(obj);
case MP_NATIVE_TYPE_INT:
case MP_NATIVE_TYPE_UINT: return mp_obj_get_int_truncated(obj);
case MP_NATIVE_TYPE_UINT:
return mp_obj_get_int_truncated(obj);
default: { // cast obj to a pointer
mp_buffer_info_t bufinfo;
if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_READ)) {
@@ -84,10 +96,14 @@ mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type) {
mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type) {
DEBUG_printf("mp_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type);
switch (type & 0xf) {
case MP_NATIVE_TYPE_OBJ: return (mp_obj_t)val;
case MP_NATIVE_TYPE_BOOL: return mp_obj_new_bool(val);
case MP_NATIVE_TYPE_INT: return mp_obj_new_int(val);
case MP_NATIVE_TYPE_UINT: return mp_obj_new_int_from_uint(val);
case MP_NATIVE_TYPE_OBJ:
return (mp_obj_t)val;
case MP_NATIVE_TYPE_BOOL:
return mp_obj_new_bool(val);
case MP_NATIVE_TYPE_INT:
return mp_obj_new_int(val);
case MP_NATIVE_TYPE_UINT:
return mp_obj_new_int_from_uint(val);
default: // a pointer
// we return just the value of the pointer as an integer
return mp_obj_new_int_from_uint(val);
@@ -102,13 +118,13 @@ mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type) {
mp_obj_t mp_obj_new_set(size_t n_args, mp_obj_t *items) {
(void)n_args;
(void)items;
mp_raise_msg(&mp_type_RuntimeError, "set unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("set unsupported"));
}
void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item) {
(void)self_in;
(void)item;
mp_raise_msg(&mp_type_RuntimeError, "set unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("set unsupported"));
}
#endif
@@ -117,7 +133,7 @@ mp_obj_t mp_obj_new_slice(mp_obj_t ostart, mp_obj_t ostop, mp_obj_t ostep) {
(void)ostart;
(void)ostop;
(void)ostep;
mp_raise_msg(&mp_type_RuntimeError, "slice unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("slice unsupported"));
}
#endif
@@ -211,53 +227,35 @@ STATIC bool mp_native_yield_from(mp_obj_t gen, mp_obj_t send_value, mp_obj_t *re
return false;
}
#if MICROPY_PY_BUILTINS_FLOAT
STATIC mp_obj_t mp_obj_new_float_from_f(float f) {
return mp_obj_new_float((mp_float_t)f);
}
STATIC mp_obj_t mp_obj_new_float_from_d(double d) {
return mp_obj_new_float((mp_float_t)d);
}
STATIC float mp_obj_get_float_to_f(mp_obj_t o) {
return (float)mp_obj_get_float(o);
}
STATIC double mp_obj_get_float_to_d(mp_obj_t o) {
return (double)mp_obj_get_float(o);
}
#else
#if !MICROPY_PY_BUILTINS_FLOAT
STATIC mp_obj_t mp_obj_new_float_from_f(float f) {
(void)f;
mp_raise_msg(&mp_type_RuntimeError, "float unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
}
STATIC mp_obj_t mp_obj_new_float_from_d(double d) {
(void)d;
mp_raise_msg(&mp_type_RuntimeError, "float unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
}
STATIC float mp_obj_get_float_to_f(mp_obj_t o) {
(void)o;
mp_raise_msg(&mp_type_RuntimeError, "float unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
}
STATIC double mp_obj_get_float_to_d(mp_obj_t o) {
(void)o;
mp_raise_msg(&mp_type_RuntimeError, "float unsupported");
mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
}
#endif
// these must correspond to the respective enum in runtime0.h
// these must correspond to the respective enum in nativeglue.h
const mp_fun_table_t mp_fun_table = {
&mp_const_none_obj,
&mp_const_false_obj,
&mp_const_true_obj,
mp_const_none,
mp_const_false,
mp_const_true,
mp_native_from_obj,
mp_native_to_obj,
mp_native_swap_globals,

View File

@@ -91,7 +91,7 @@ typedef struct _mp_fun_table_t {
mp_const_obj_t const_true;
mp_uint_t (*native_from_obj)(mp_obj_t obj, mp_uint_t type);
mp_obj_t (*native_to_obj)(mp_uint_t val, mp_uint_t type);
mp_obj_dict_t *(*swap_globals)(mp_obj_dict_t *new_globals);
mp_obj_dict_t *(*swap_globals)(mp_obj_dict_t * new_globals);
mp_obj_t (*load_name)(qstr qst);
mp_obj_t (*load_global)(qstr qst);
mp_obj_t (*load_build_class)(void);
@@ -135,7 +135,7 @@ typedef struct _mp_fun_table_t {
mp_int_t (*small_int_floor_divide)(mp_int_t num, mp_int_t denom);
mp_int_t (*small_int_modulo)(mp_int_t dividend, mp_int_t divisor);
bool (*yield_from)(mp_obj_t gen, mp_obj_t send_value, mp_obj_t *ret_value);
void *setjmp;
void *setjmp_;
// Additional entries for dynamic runtime, starts at index 50
void *(*memset_)(void *s, int c, size_t n);
void *(*memmove_)(void *dest, const void *src, size_t n);
@@ -145,10 +145,10 @@ typedef struct _mp_fun_table_t {
#if defined(__GNUC__)
NORETURN // Only certain compilers support no-return attributes in function pointer declarations
#endif
void (*raise_msg)(const mp_obj_type_t *exc_type, const char *msg);
mp_obj_type_t *(*obj_get_type)(mp_const_obj_t o_in);
mp_obj_t (*obj_new_str)(const char* data, size_t len);
mp_obj_t (*obj_new_bytes)(const byte* data, size_t len);
void (*raise_msg)(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg);
const mp_obj_type_t *(*obj_get_type)(mp_const_obj_t o_in);
mp_obj_t (*obj_new_str)(const char *data, size_t len);
mp_obj_t (*obj_new_bytes)(const byte *data, size_t len);
mp_obj_t (*obj_new_bytearray_by_ref)(size_t n, void *items);
mp_obj_t (*obj_new_float_from_f)(float f);
mp_obj_t (*obj_new_float_from_d)(double d);

View File

@@ -30,7 +30,7 @@
// When not using setjmp, nlr_push_tail is called from inline asm so needs special care
#if MICROPY_NLR_X86 && MICROPY_NLR_OS_WINDOWS
// On these 32-bit platforms make sure nlr_push_tail doesn't have a leading underscore
unsigned int nlr_push_tail(nlr_buf_t *nlr) asm("nlr_push_tail");
unsigned int nlr_push_tail(nlr_buf_t *nlr) asm ("nlr_push_tail");
#else
// LTO can't see inside inline asm functions so explicitly mark nlr_push_tail as used
__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);

View File

@@ -39,9 +39,12 @@
#define MICROPY_NLR_NUM_REGS_X64_WIN (10)
#define MICROPY_NLR_NUM_REGS_ARM_THUMB (10)
#define MICROPY_NLR_NUM_REGS_ARM_THUMB_FP (10 + 6)
#define MICROPY_NLR_NUM_REGS_AARCH64 (13)
#define MICROPY_NLR_NUM_REGS_XTENSA (10)
#define MICROPY_NLR_NUM_REGS_XTENSAWIN (17)
// *FORMAT-OFF*
// If MICROPY_NLR_SETJMP is not enabled then auto-detect the machine arch
#if !MICROPY_NLR_SETJMP
// A lot of nlr-related things need different treatment on Windows
@@ -70,6 +73,9 @@
// so only save/restore those as an optimisation.
#define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_ARM_THUMB_FP)
#endif
#elif defined(__aarch64__)
#define MICROPY_NLR_AARCH64 (1)
#define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_AARCH64)
#elif defined(__xtensa__)
#define MICROPY_NLR_XTENSA (1)
#define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_XTENSA)
@@ -83,6 +89,8 @@
#endif
#endif
// *FORMAT-ON*
#if MICROPY_NLR_SETJMP
#include <setjmp.h>
#endif

View File

@@ -0,0 +1,83 @@
/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2021 Yonatan Goldschmidt
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/mpstate.h" // needed for NLR defs
#if MICROPY_NLR_AARCH64
// AArch64 callee-saved registers are x19-x29.
// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
// Implemented purely as inline assembly; inside a function, we have to deal with undoing the prologue, restoring
// SP and LR. This way, we don't.
__asm(
#if defined(__APPLE__) && defined(__MACH__)
"_nlr_push: \n"
".global _nlr_push \n"
#else
"nlr_push: \n"
".global nlr_push \n"
#endif
"mov x9, sp \n"
"stp lr, x9, [x0, #16]\n" // 16 == offsetof(nlr_buf_t, regs)
"stp x19, x20, [x0, #32]\n"
"stp x21, x22, [x0, #48]\n"
"stp x23, x24, [x0, #64]\n"
"stp x25, x26, [x0, #80]\n"
"stp x27, x28, [x0, #96]\n"
"str x29, [x0, #112]\n"
#if defined(__APPLE__) && defined(__MACH__)
"b _nlr_push_tail \n" // do the rest in C
#else
"b nlr_push_tail \n" // do the rest in C
#endif
);
NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
MP_STATIC_ASSERT(offsetof(nlr_buf_t, regs) == 16); // asm assumes it
__asm volatile (
"ldr x29, [%0, #112]\n"
"ldp x27, x28, [%0, #96]\n"
"ldp x25, x26, [%0, #80]\n"
"ldp x23, x24, [%0, #64]\n"
"ldp x21, x22, [%0, #48]\n"
"ldp x19, x20, [%0, #32]\n"
"ldp lr, x9, [%0, #16]\n" // 16 == offsetof(nlr_buf_t, regs)
"mov sp, x9 \n"
"mov x0, #1 \n" // non-local return
"ret \n"
:
: "r" (top)
:
);
MP_UNREACHABLE
}
#endif // MICROPY_NLR_AARCH64

View File

@@ -34,44 +34,44 @@
unsigned int nlr_push(nlr_buf_t *nlr) {
__asm__ volatile(
"li 4, 0x4eed ; " // Store canary
"std 4, 0x00(%0) ;"
"std 0, 0x08(%0) ;"
"std 1, 0x10(%0) ;"
"std 2, 0x18(%0) ;"
"std 14, 0x20(%0) ;"
"std 15, 0x28(%0) ;"
"std 16, 0x30(%0) ;"
"std 17, 0x38(%0) ;"
"std 18, 0x40(%0) ;"
"std 19, 0x48(%0) ;"
"std 20, 0x50(%0) ;"
"std 21, 0x58(%0) ;"
"std 22, 0x60(%0) ;"
"std 23, 0x68(%0) ;"
"std 24, 0x70(%0) ;"
"std 25, 0x78(%0) ;"
"std 26, 0x80(%0) ;"
"std 27, 0x88(%0) ;"
"std 28, 0x90(%0) ;"
"std 29, 0x98(%0) ;"
"std 30, 0xA0(%0) ;"
"std 31, 0xA8(%0) ;"
__asm__ volatile (
"li 4, 0x4eed ; " // Store canary
"std 4, 0x00(%0) ;"
"std 0, 0x08(%0) ;"
"std 1, 0x10(%0) ;"
"std 2, 0x18(%0) ;"
"std 14, 0x20(%0) ;"
"std 15, 0x28(%0) ;"
"std 16, 0x30(%0) ;"
"std 17, 0x38(%0) ;"
"std 18, 0x40(%0) ;"
"std 19, 0x48(%0) ;"
"std 20, 0x50(%0) ;"
"std 21, 0x58(%0) ;"
"std 22, 0x60(%0) ;"
"std 23, 0x68(%0) ;"
"std 24, 0x70(%0) ;"
"std 25, 0x78(%0) ;"
"std 26, 0x80(%0) ;"
"std 27, 0x88(%0) ;"
"std 28, 0x90(%0) ;"
"std 29, 0x98(%0) ;"
"std 30, 0xA0(%0) ;"
"std 31, 0xA8(%0) ;"
"mfcr 4 ; "
"std 4, 0xB0(%0) ;"
"mflr 4 ;"
"std 4, 0xB8(%0) ;"
"li 4, nlr_push_tail@l ;"
"oris 4, 4, nlr_push_tail@h ;"
"mtctr 4 ;"
"mr 3, %1 ; "
"bctr ;"
:
: "r"(&nlr->regs), "r"(nlr)
:
);
"mfcr 4 ; "
"std 4, 0xB0(%0) ;"
"mflr 4 ;"
"std 4, 0xB8(%0) ;"
"li 4, nlr_push_tail@l ;"
"oris 4, 4, nlr_push_tail@h ;"
"mtctr 4 ;"
"mr 3, %1 ; "
"bctr ;"
:
: "r" (&nlr->regs), "r" (nlr)
:
);
return 0;
}
@@ -79,41 +79,41 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm__ volatile(
"ld 3, 0x0(%0) ;"
"cmpdi 3, 0x4eed ; " // Check canary
"bne . ; "
"ld 0, 0x08(%0) ;"
"ld 1, 0x10(%0) ;"
"ld 2, 0x18(%0) ;"
"ld 14, 0x20(%0) ;"
"ld 15, 0x28(%0) ;"
"ld 16, 0x30(%0) ;"
"ld 17, 0x38(%0) ;"
"ld 18, 0x40(%0) ;"
"ld 19, 0x48(%0) ;"
"ld 20, 0x50(%0) ;"
"ld 21, 0x58(%0) ;"
"ld 22, 0x60(%0) ;"
"ld 23, 0x68(%0) ;"
"ld 24, 0x70(%0) ;"
"ld 25, 0x78(%0) ;"
"ld 26, 0x80(%0) ;"
"ld 27, 0x88(%0) ;"
"ld 28, 0x90(%0) ;"
"ld 29, 0x98(%0) ;"
"ld 30, 0xA0(%0) ;"
"ld 31, 0xA8(%0) ;"
"ld 3, 0xB0(%0) ;"
"mtcr 3 ;"
"ld 3, 0xB8(%0) ;"
"mtlr 3 ; "
"li 3, 1;"
"blr ;"
:
: "r"(&top->regs)
:
);
__asm__ volatile (
"ld 3, 0x0(%0) ;"
"cmpdi 3, 0x4eed ; " // Check canary
"bne . ; "
"ld 0, 0x08(%0) ;"
"ld 1, 0x10(%0) ;"
"ld 2, 0x18(%0) ;"
"ld 14, 0x20(%0) ;"
"ld 15, 0x28(%0) ;"
"ld 16, 0x30(%0) ;"
"ld 17, 0x38(%0) ;"
"ld 18, 0x40(%0) ;"
"ld 19, 0x48(%0) ;"
"ld 20, 0x50(%0) ;"
"ld 21, 0x58(%0) ;"
"ld 22, 0x60(%0) ;"
"ld 23, 0x68(%0) ;"
"ld 24, 0x70(%0) ;"
"ld 25, 0x78(%0) ;"
"ld 26, 0x80(%0) ;"
"ld 27, 0x88(%0) ;"
"ld 28, 0x90(%0) ;"
"ld 29, 0x98(%0) ;"
"ld 30, 0xA0(%0) ;"
"ld 31, 0xA8(%0) ;"
"ld 3, 0xB0(%0) ;"
"mtcr 3 ;"
"ld 3, 0xB8(%0) ;"
"mtlr 3 ; "
"li 3, 1;"
"blr ;"
:
: "r" (&top->regs)
:
);
MP_UNREACHABLE;
}

View File

@@ -39,51 +39,51 @@
__attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
__asm volatile (
"str r4, [r0, #12] \n" // store r4 into nlr_buf
"str r5, [r0, #16] \n" // store r5 into nlr_buf
"str r6, [r0, #20] \n" // store r6 into nlr_buf
"str r7, [r0, #24] \n" // store r7 into nlr_buf
"str r4, [r0, #12] \n" // store r4 into nlr_buf
"str r5, [r0, #16] \n" // store r5 into nlr_buf
"str r6, [r0, #20] \n" // store r6 into nlr_buf
"str r7, [r0, #24] \n" // store r7 into nlr_buf
#if !defined(__thumb2__)
"mov r1, r8 \n"
"str r1, [r0, #28] \n" // store r8 into nlr_buf
"mov r1, r9 \n"
"str r1, [r0, #32] \n" // store r9 into nlr_buf
"mov r1, r10 \n"
"str r1, [r0, #36] \n" // store r10 into nlr_buf
"mov r1, r11 \n"
"str r1, [r0, #40] \n" // store r11 into nlr_buf
"mov r1, r13 \n"
"str r1, [r0, #44] \n" // store r13=sp into nlr_buf
"mov r1, lr \n"
"str r1, [r0, #8] \n" // store lr into nlr_buf
#else
"str r8, [r0, #28] \n" // store r8 into nlr_buf
"str r9, [r0, #32] \n" // store r9 into nlr_buf
"str r10, [r0, #36] \n" // store r10 into nlr_buf
"str r11, [r0, #40] \n" // store r11 into nlr_buf
"str r13, [r0, #44] \n" // store r13=sp into nlr_buf
#if MICROPY_NLR_NUM_REGS == 16
"vstr d8, [r0, #48] \n" // store s16-s17 into nlr_buf
"vstr d9, [r0, #56] \n" // store s18-s19 into nlr_buf
"vstr d10, [r0, #64] \n" // store s20-s21 into nlr_buf
#endif
"str lr, [r0, #8] \n" // store lr into nlr_buf
#endif
#if !defined(__thumb2__)
"mov r1, r8 \n"
"str r1, [r0, #28] \n" // store r8 into nlr_buf
"mov r1, r9 \n"
"str r1, [r0, #32] \n" // store r9 into nlr_buf
"mov r1, r10 \n"
"str r1, [r0, #36] \n" // store r10 into nlr_buf
"mov r1, r11 \n"
"str r1, [r0, #40] \n" // store r11 into nlr_buf
"mov r1, r13 \n"
"str r1, [r0, #44] \n" // store r13=sp into nlr_buf
"mov r1, lr \n"
"str r1, [r0, #8] \n" // store lr into nlr_buf
#else
"str r8, [r0, #28] \n" // store r8 into nlr_buf
"str r9, [r0, #32] \n" // store r9 into nlr_buf
"str r10, [r0, #36] \n" // store r10 into nlr_buf
"str r11, [r0, #40] \n" // store r11 into nlr_buf
"str r13, [r0, #44] \n" // store r13=sp into nlr_buf
#if MICROPY_NLR_NUM_REGS == 16
"vstr d8, [r0, #48] \n" // store s16-s17 into nlr_buf
"vstr d9, [r0, #56] \n" // store s18-s19 into nlr_buf
"vstr d10, [r0, #64] \n" // store s20-s21 into nlr_buf
#endif
"str lr, [r0, #8] \n" // store lr into nlr_buf
#endif
#if !defined(__thumb2__)
"ldr r1, nlr_push_tail_var \n"
"bx r1 \n" // do the rest in C
".align 2 \n"
"nlr_push_tail_var: .word nlr_push_tail \n"
#else
#if defined(__APPLE__) || defined(__MACH__)
"b _nlr_push_tail \n" // do the rest in C
#else
"b nlr_push_tail \n" // do the rest in C
#endif
#endif
);
#if !defined(__thumb2__)
"ldr r1, nlr_push_tail_var \n"
"bx r1 \n" // do the rest in C
".align 2 \n"
"nlr_push_tail_var: .word nlr_push_tail \n"
#else
#if defined(__APPLE__) || defined(__MACH__)
"b _nlr_push_tail \n" // do the rest in C
#else
"b nlr_push_tail \n" // do the rest in C
#endif
#endif
);
#if !defined(__clang__) && defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8))
// Older versions of gcc give an error when naked functions don't return a value
@@ -96,44 +96,44 @@ NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
"mov r0, %0 \n" // r0 points to nlr_buf
"ldr r4, [r0, #12] \n" // load r4 from nlr_buf
"ldr r5, [r0, #16] \n" // load r5 from nlr_buf
"ldr r6, [r0, #20] \n" // load r6 from nlr_buf
"ldr r7, [r0, #24] \n" // load r7 from nlr_buf
"mov r0, %0 \n" // r0 points to nlr_buf
"ldr r4, [r0, #12] \n" // load r4 from nlr_buf
"ldr r5, [r0, #16] \n" // load r5 from nlr_buf
"ldr r6, [r0, #20] \n" // load r6 from nlr_buf
"ldr r7, [r0, #24] \n" // load r7 from nlr_buf
#if !defined(__thumb2__)
"ldr r1, [r0, #28] \n" // load r8 from nlr_buf
"mov r8, r1 \n"
"ldr r1, [r0, #32] \n" // load r9 from nlr_buf
"mov r9, r1 \n"
"ldr r1, [r0, #36] \n" // load r10 from nlr_buf
"mov r10, r1 \n"
"ldr r1, [r0, #40] \n" // load r11 from nlr_buf
"mov r11, r1 \n"
"ldr r1, [r0, #44] \n" // load r13=sp from nlr_buf
"mov r13, r1 \n"
"ldr r1, [r0, #8] \n" // load lr from nlr_buf
"mov lr, r1 \n"
#else
"ldr r8, [r0, #28] \n" // load r8 from nlr_buf
"ldr r9, [r0, #32] \n" // load r9 from nlr_buf
"ldr r10, [r0, #36] \n" // load r10 from nlr_buf
"ldr r11, [r0, #40] \n" // load r11 from nlr_buf
"ldr r13, [r0, #44] \n" // load r13=sp from nlr_buf
#if MICROPY_NLR_NUM_REGS == 16
"vldr d8, [r0, #48] \n" // load s16-s17 from nlr_buf
"vldr d9, [r0, #56] \n" // load s18-s19 from nlr_buf
"vldr d10, [r0, #64] \n" // load s20-s21 from nlr_buf
#endif
"ldr lr, [r0, #8] \n" // load lr from nlr_buf
#endif
"movs r0, #1 \n" // return 1, non-local return
"bx lr \n" // return
: // output operands
: "r"(top) // input operands
: // clobbered registers
);
#if !defined(__thumb2__)
"ldr r1, [r0, #28] \n" // load r8 from nlr_buf
"mov r8, r1 \n"
"ldr r1, [r0, #32] \n" // load r9 from nlr_buf
"mov r9, r1 \n"
"ldr r1, [r0, #36] \n" // load r10 from nlr_buf
"mov r10, r1 \n"
"ldr r1, [r0, #40] \n" // load r11 from nlr_buf
"mov r11, r1 \n"
"ldr r1, [r0, #44] \n" // load r13=sp from nlr_buf
"mov r13, r1 \n"
"ldr r1, [r0, #8] \n" // load lr from nlr_buf
"mov lr, r1 \n"
#else
"ldr r8, [r0, #28] \n" // load r8 from nlr_buf
"ldr r9, [r0, #32] \n" // load r9 from nlr_buf
"ldr r10, [r0, #36] \n" // load r10 from nlr_buf
"ldr r11, [r0, #40] \n" // load r11 from nlr_buf
"ldr r13, [r0, #44] \n" // load r13=sp from nlr_buf
#if MICROPY_NLR_NUM_REGS == 16
"vldr d8, [r0, #48] \n" // load s16-s17 from nlr_buf
"vldr d9, [r0, #56] \n" // load s18-s19 from nlr_buf
"vldr d10, [r0, #64] \n" // load s20-s21 from nlr_buf
#endif
"ldr lr, [r0, #8] \n" // load lr from nlr_buf
#endif
"movs r0, #1 \n" // return 1, non-local return
"bx lr \n" // return
: // output operands
: "r" (top) // input operands
: // clobbered registers
);
MP_UNREACHABLE
}

View File

@@ -41,41 +41,41 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
#if MICROPY_NLR_OS_WINDOWS
__asm volatile (
"movq (%rsp), %rax \n" // load return %rip
"movq %rax, 16(%rcx) \n" // store %rip into nlr_buf
"movq %rbp, 24(%rcx) \n" // store %rbp into nlr_buf
"movq %rsp, 32(%rcx) \n" // store %rsp into nlr_buf
"movq %rbx, 40(%rcx) \n" // store %rbx into nlr_buf
"movq %r12, 48(%rcx) \n" // store %r12 into nlr_buf
"movq %r13, 56(%rcx) \n" // store %r13 into nlr_buf
"movq %r14, 64(%rcx) \n" // store %r14 into nlr_buf
"movq %r15, 72(%rcx) \n" // store %r15 into nlr_buf
"movq %rdi, 80(%rcx) \n" // store %rdr into nlr_buf
"movq %rsi, 88(%rcx) \n" // store %rsi into nlr_buf
"jmp nlr_push_tail \n" // do the rest in C
);
"movq (%rsp), %rax \n" // load return %rip
"movq %rax, 16(%rcx) \n" // store %rip into nlr_buf
"movq %rbp, 24(%rcx) \n" // store %rbp into nlr_buf
"movq %rsp, 32(%rcx) \n" // store %rsp into nlr_buf
"movq %rbx, 40(%rcx) \n" // store %rbx into nlr_buf
"movq %r12, 48(%rcx) \n" // store %r12 into nlr_buf
"movq %r13, 56(%rcx) \n" // store %r13 into nlr_buf
"movq %r14, 64(%rcx) \n" // store %r14 into nlr_buf
"movq %r15, 72(%rcx) \n" // store %r15 into nlr_buf
"movq %rdi, 80(%rcx) \n" // store %rdr into nlr_buf
"movq %rsi, 88(%rcx) \n" // store %rsi into nlr_buf
"jmp nlr_push_tail \n" // do the rest in C
);
#else
__asm volatile (
#if defined(__APPLE__) || defined(__MACH__)
"pop %rbp \n" // undo function's prelude
#endif
"movq (%rsp), %rax \n" // load return %rip
"movq %rax, 16(%rdi) \n" // store %rip into nlr_buf
"movq %rbp, 24(%rdi) \n" // store %rbp into nlr_buf
"movq %rsp, 32(%rdi) \n" // store %rsp into nlr_buf
"movq %rbx, 40(%rdi) \n" // store %rbx into nlr_buf
"movq %r12, 48(%rdi) \n" // store %r12 into nlr_buf
"movq %r13, 56(%rdi) \n" // store %r13 into nlr_buf
"movq %r14, 64(%rdi) \n" // store %r14 into nlr_buf
"movq %r15, 72(%rdi) \n" // store %r15 into nlr_buf
#if defined(__APPLE__) || defined(__MACH__)
"jmp _nlr_push_tail \n" // do the rest in C
#else
"jmp nlr_push_tail \n" // do the rest in C
#endif
);
#if defined(__APPLE__) && defined(__MACH__)
"pop %rbp \n" // undo function's prelude
#endif
"movq (%rsp), %rax \n" // load return %rip
"movq %rax, 16(%rdi) \n" // store %rip into nlr_buf
"movq %rbp, 24(%rdi) \n" // store %rbp into nlr_buf
"movq %rsp, 32(%rdi) \n" // store %rsp into nlr_buf
"movq %rbx, 40(%rdi) \n" // store %rbx into nlr_buf
"movq %r12, 48(%rdi) \n" // store %r12 into nlr_buf
"movq %r13, 56(%rdi) \n" // store %r13 into nlr_buf
"movq %r14, 64(%rdi) \n" // store %r14 into nlr_buf
"movq %r15, 72(%rdi) \n" // store %r15 into nlr_buf
#if defined(__APPLE__) && defined(__MACH__)
"jmp _nlr_push_tail \n" // do the rest in C
#else
"jmp nlr_push_tail \n" // do the rest in C
#endif
);
#endif
@@ -86,27 +86,27 @@ NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
"movq %0, %%rcx \n" // %rcx points to nlr_buf
#if MICROPY_NLR_OS_WINDOWS
"movq 88(%%rcx), %%rsi \n" // load saved %rsi
"movq 80(%%rcx), %%rdi \n" // load saved %rdr
#endif
"movq 72(%%rcx), %%r15 \n" // load saved %r15
"movq 64(%%rcx), %%r14 \n" // load saved %r14
"movq 56(%%rcx), %%r13 \n" // load saved %r13
"movq 48(%%rcx), %%r12 \n" // load saved %r12
"movq 40(%%rcx), %%rbx \n" // load saved %rbx
"movq 32(%%rcx), %%rsp \n" // load saved %rsp
"movq 24(%%rcx), %%rbp \n" // load saved %rbp
"movq 16(%%rcx), %%rax \n" // load saved %rip
"movq %%rax, (%%rsp) \n" // store saved %rip to stack
"xorq %%rax, %%rax \n" // clear return register
"inc %%al \n" // increase to make 1, non-local return
"ret \n" // return
: // output operands
: "r"(top) // input operands
: // clobbered registers
);
"movq %0, %%rcx \n" // %rcx points to nlr_buf
#if MICROPY_NLR_OS_WINDOWS
"movq 88(%%rcx), %%rsi \n" // load saved %rsi
"movq 80(%%rcx), %%rdi \n" // load saved %rdi
#endif
"movq 72(%%rcx), %%r15 \n" // load saved %r15
"movq 64(%%rcx), %%r14 \n" // load saved %r14
"movq 56(%%rcx), %%r13 \n" // load saved %r13
"movq 48(%%rcx), %%r12 \n" // load saved %r12
"movq 40(%%rcx), %%rbx \n" // load saved %rbx
"movq 32(%%rcx), %%rsp \n" // load saved %rsp
"movq 24(%%rcx), %%rbp \n" // load saved %rbp
"movq 16(%%rcx), %%rax \n" // load saved %rip
"movq %%rax, (%%rsp) \n" // store saved %rip to stack
"xorq %%rax, %%rax \n" // clear return register
"inc %%al \n" // increase to make 1, non-local return
"ret \n" // return
: // output operands
: "r" (top) // input operands
: // clobbered registers
);
MP_UNREACHABLE
}

View File

@@ -34,7 +34,7 @@
// ebx, esi, edi, ebp, esp, eip
#if MICROPY_NLR_OS_WINDOWS
unsigned int nlr_push_tail(nlr_buf_t *nlr) asm("nlr_push_tail");
unsigned int nlr_push_tail(nlr_buf_t *nlr) asm ("nlr_push_tail");
#else
__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);
#endif
@@ -56,24 +56,22 @@ __attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);
__attribute__((naked))
#endif
unsigned int nlr_push(nlr_buf_t *nlr) {
#if !USE_NAKED
(void)nlr;
#endif
__asm volatile (
#if UNDO_PRELUDE
"pop %ebp \n" // undo function's prelude
#endif
"mov 4(%esp), %edx \n" // load nlr_buf
"mov (%esp), %eax \n" // load return %eip
"mov %eax, 8(%edx) \n" // store %eip into nlr_buf
"mov %ebp, 12(%edx) \n" // store %ebp into nlr_buf
"mov %esp, 16(%edx) \n" // store %esp into nlr_buf
"mov %ebx, 20(%edx) \n" // store %ebx into nlr_buf
"mov %edi, 24(%edx) \n" // store %edi into nlr_buf
"mov %esi, 28(%edx) \n" // store %esi into nlr_buf
"jmp nlr_push_tail \n" // do the rest in C
);
#if UNDO_PRELUDE
"pop %ebp \n" // undo function's prelude
#endif
"mov 4(%esp), %edx \n" // load nlr_buf
"mov (%esp), %eax \n" // load return %eip
"mov %eax, 8(%edx) \n" // store %eip into nlr_buf
"mov %ebp, 12(%edx) \n" // store %ebp into nlr_buf
"mov %esp, 16(%edx) \n" // store %esp into nlr_buf
"mov %ebx, 20(%edx) \n" // store %ebx into nlr_buf
"mov %edi, 24(%edx) \n" // store %edi into nlr_buf
"mov %esi, 28(%edx) \n" // store %esi into nlr_buf
"jmp nlr_push_tail \n" // do the rest in C
);
#if !USE_NAKED
return 0; // needed to silence compiler warning
@@ -84,21 +82,21 @@ NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
"mov %0, %%edx \n" // %edx points to nlr_buf
"mov 28(%%edx), %%esi \n" // load saved %esi
"mov 24(%%edx), %%edi \n" // load saved %edi
"mov 20(%%edx), %%ebx \n" // load saved %ebx
"mov 16(%%edx), %%esp \n" // load saved %esp
"mov 12(%%edx), %%ebp \n" // load saved %ebp
"mov 8(%%edx), %%eax \n" // load saved %eip
"mov %%eax, (%%esp) \n" // store saved %eip to stack
"xor %%eax, %%eax \n" // clear return register
"inc %%al \n" // increase to make 1, non-local return
"ret \n" // return
: // output operands
: "r"(top) // input operands
: // clobbered registers
);
"mov %0, %%edx \n" // %edx points to nlr_buf
"mov 28(%%edx), %%esi \n" // load saved %esi
"mov 24(%%edx), %%edi \n" // load saved %edi
"mov 20(%%edx), %%ebx \n" // load saved %ebx
"mov 16(%%edx), %%esp \n" // load saved %esp
"mov 12(%%edx), %%ebp \n" // load saved %ebp
"mov 8(%%edx), %%eax \n" // load saved %eip
"mov %%eax, (%%esp) \n" // store saved %eip to stack
"xor %%eax, %%eax \n" // clear return register
"inc %%al \n" // increase to make 1, non-local return
"ret \n" // return
: // output operands
: "r" (top) // input operands
: // clobbered registers
);
MP_UNREACHABLE
}

View File

@@ -39,18 +39,18 @@
unsigned int nlr_push(nlr_buf_t *nlr) {
__asm volatile (
"s32i.n a0, a2, 8 \n" // save regs...
"s32i.n a1, a2, 12 \n"
"s32i.n a8, a2, 16 \n"
"s32i.n a9, a2, 20 \n"
"s32i.n a10, a2, 24 \n"
"s32i.n a11, a2, 28 \n"
"s32i.n a12, a2, 32 \n"
"s32i.n a13, a2, 36 \n"
"s32i.n a14, a2, 40 \n"
"s32i.n a15, a2, 44 \n"
"j nlr_push_tail \n" // do the rest in C
);
"s32i.n a0, a2, 8 \n" // save regs...
"s32i.n a1, a2, 12 \n"
"s32i.n a8, a2, 16 \n"
"s32i.n a9, a2, 20 \n"
"s32i.n a10, a2, 24 \n"
"s32i.n a11, a2, 28 \n"
"s32i.n a12, a2, 32 \n"
"s32i.n a13, a2, 36 \n"
"s32i.n a14, a2, 40 \n"
"s32i.n a15, a2, 44 \n"
"j nlr_push_tail \n" // do the rest in C
);
return 0; // needed to silence compiler warning
}
@@ -59,23 +59,23 @@ NORETURN void nlr_jump(void *val) {
MP_NLR_JUMP_HEAD(val, top)
__asm volatile (
"mov.n a2, %0 \n" // a2 points to nlr_buf
"l32i.n a0, a2, 8 \n" // restore regs...
"l32i.n a1, a2, 12 \n"
"l32i.n a8, a2, 16 \n"
"l32i.n a9, a2, 20 \n"
"l32i.n a10, a2, 24 \n"
"l32i.n a11, a2, 28 \n"
"l32i.n a12, a2, 32 \n"
"l32i.n a13, a2, 36 \n"
"l32i.n a14, a2, 40 \n"
"l32i.n a15, a2, 44 \n"
"movi.n a2, 1 \n" // return 1, non-local return
"ret.n \n" // return
: // output operands
: "r"(top) // input operands
: // clobbered registers
);
"mov.n a2, %0 \n" // a2 points to nlr_buf
"l32i.n a0, a2, 8 \n" // restore regs...
"l32i.n a1, a2, 12 \n"
"l32i.n a8, a2, 16 \n"
"l32i.n a9, a2, 20 \n"
"l32i.n a10, a2, 24 \n"
"l32i.n a11, a2, 28 \n"
"l32i.n a12, a2, 32 \n"
"l32i.n a13, a2, 36 \n"
"l32i.n a14, a2, 40 \n"
"l32i.n a15, a2, 44 \n"
"movi.n a2, 1 \n" // return 1, non-local return
"ret.n \n" // return
: // output operands
: "r" (top) // input operands
: // clobbered registers
);
MP_UNREACHABLE
}

View File

@@ -37,19 +37,62 @@
#include "py/stackctrl.h"
#include "py/stream.h" // for mp_obj_print
mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in) {
const mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in) {
#if MICROPY_OBJ_IMMEDIATE_OBJS && MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A
if (mp_obj_is_obj(o_in)) {
const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
return o->type;
} else {
static const mp_obj_type_t *const types[] = {
NULL, &mp_type_int, &mp_type_str, &mp_type_int,
NULL, &mp_type_int, &mp_type_NoneType, &mp_type_int,
NULL, &mp_type_int, &mp_type_str, &mp_type_int,
NULL, &mp_type_int, &mp_type_bool, &mp_type_int,
};
return types[(uintptr_t)o_in & 0xf];
}
#elif MICROPY_OBJ_IMMEDIATE_OBJS && MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
if (mp_obj_is_small_int(o_in)) {
return (mp_obj_type_t*)&mp_type_int;
} else if (mp_obj_is_qstr(o_in)) {
return (mp_obj_type_t*)&mp_type_str;
return &mp_type_int;
} else if (mp_obj_is_obj(o_in)) {
const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
return o->type;
#if MICROPY_PY_BUILTINS_FLOAT
} else if ((((mp_uint_t)(o_in)) & 0xff800007) != 0x00000006) {
return &mp_type_float;
#endif
} else {
static const mp_obj_type_t *const types[] = {
&mp_type_str, &mp_type_NoneType, &mp_type_str, &mp_type_bool,
};
return types[((uintptr_t)o_in >> 3) & 3];
}
#else
if (mp_obj_is_small_int(o_in)) {
return &mp_type_int;
} else if (mp_obj_is_qstr(o_in)) {
return &mp_type_str;
#if MICROPY_PY_BUILTINS_FLOAT && ( \
MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C || MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D)
} else if (mp_obj_is_float(o_in)) {
return (mp_obj_type_t*)&mp_type_float;
return &mp_type_float;
#endif
#if MICROPY_OBJ_IMMEDIATE_OBJS
} else if (mp_obj_is_immediate_obj(o_in)) {
static const mp_obj_type_t *const types[2] = {&mp_type_NoneType, &mp_type_bool};
return types[MP_OBJ_IMMEDIATE_OBJ_VALUE(o_in) & 1];
#endif
} else {
const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
return (mp_obj_type_t*)o->type;
return o->type;
}
#endif
}
const char *mp_obj_get_type_str(mp_const_obj_t o_in) {
@@ -59,15 +102,15 @@ const char *mp_obj_get_type_str(mp_const_obj_t o_in) {
void mp_obj_print_helper(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
// There can be data structures nested too deep, or just recursive
MP_STACK_CHECK();
#ifndef NDEBUG
#ifndef NDEBUG
if (o_in == MP_OBJ_NULL) {
mp_print_str(print, "(nil)");
return;
}
#endif
mp_obj_type_t *type = mp_obj_get_type(o_in);
#endif
const mp_obj_type_t *type = mp_obj_get_type(o_in);
if (type->print != NULL) {
type->print((mp_print_t*)print, o_in, kind);
type->print((mp_print_t *)print, o_in, kind);
} else {
mp_printf(print, "<%q>", type->name);
}
@@ -86,11 +129,11 @@ void mp_obj_print_exception(const mp_print_t *print, mp_obj_t exc) {
assert(n % 3 == 0);
mp_print_str(print, "Traceback (most recent call last):\n");
for (int i = n - 3; i >= 0; i -= 3) {
#if MICROPY_ENABLE_SOURCE_LINE
#if MICROPY_ENABLE_SOURCE_LINE
mp_printf(print, " File \"%q\", line %d", values[i], (int)values[i + 1]);
#else
#else
mp_printf(print, " File \"%q\"", values[i]);
#endif
#endif
// the block name can be NULL if it's unknown
qstr block = values[i + 2];
if (block == MP_QSTRnull) {
@@ -119,7 +162,7 @@ bool mp_obj_is_true(mp_obj_t arg) {
return 1;
}
} else {
mp_obj_type_t *type = mp_obj_get_type(arg);
const mp_obj_type_t *type = mp_obj_get_type(arg);
if (type->unary_op != NULL) {
mp_obj_t result = type->unary_op(MP_UNARY_OP_BOOL, arg);
if (result != MP_OBJ_NULL) {
@@ -139,14 +182,14 @@ bool mp_obj_is_true(mp_obj_t arg) {
}
bool mp_obj_is_callable(mp_obj_t o_in) {
mp_call_fun_t call = mp_obj_get_type(o_in)->call;
const mp_call_fun_t call = mp_obj_get_type(o_in)->call;
if (call != mp_obj_instance_call) {
return call != NULL;
}
return mp_obj_instance_is_callable(o_in);
}
// This function implements the '==' operator (and so the inverse of '!=').
// This function implements the '==' and '!=' operators.
//
// From the Python language reference:
// (https://docs.python.org/3/reference/expressions.html#not-in)
@@ -159,67 +202,89 @@ bool mp_obj_is_callable(mp_obj_t o_in) {
// Furthermore, from the v3.4.2 code for object.c: "Practical amendments: If rich
// comparison returns NotImplemented, == and != are decided by comparing the object
// pointer."
bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) {
// Float (and complex) NaN is never equal to anything, not even itself,
// so we must have a special check here to cover those cases.
if (o1 == o2
#if MICROPY_PY_BUILTINS_FLOAT
&& !mp_obj_is_float(o1)
#endif
#if MICROPY_PY_BUILTINS_COMPLEX
&& !mp_obj_is_type(o1, &mp_type_complex)
#endif
) {
return true;
}
if (o1 == mp_const_none || o2 == mp_const_none) {
return false;
}
mp_obj_t mp_obj_equal_not_equal(mp_binary_op_t op, mp_obj_t o1, mp_obj_t o2) {
mp_obj_t local_true = (op == MP_BINARY_OP_NOT_EQUAL) ? mp_const_false : mp_const_true;
mp_obj_t local_false = (op == MP_BINARY_OP_NOT_EQUAL) ? mp_const_true : mp_const_false;
int pass_number = 0;
// fast path for small ints
if (mp_obj_is_small_int(o1)) {
if (mp_obj_is_small_int(o2)) {
// both SMALL_INT, and not equal if we get here
return false;
} else {
mp_obj_t temp = o2; o2 = o1; o1 = temp;
// o2 is now the SMALL_INT, o1 is not
// fall through to generic op
}
// Shortcut for very common cases
if (o1 == o2 &&
(mp_obj_is_small_int(o1) || !(mp_obj_get_type(o1)->flags & MP_TYPE_FLAG_EQ_NOT_REFLEXIVE))) {
return local_true;
}
// fast path for strings
if (mp_obj_is_str(o1)) {
if (mp_obj_is_str(o2)) {
// both strings, use special function
return mp_obj_str_equal(o1, o2);
} else {
// a string is never equal to anything else
goto str_cmp_err;
}
} else if (mp_obj_is_str(o2)) {
// o1 is not a string (else caught above), so the objects are not equal
str_cmp_err:
return mp_obj_str_equal(o1, o2) ? local_true : local_false;
#if MICROPY_PY_STR_BYTES_CMP_WARN
if (mp_obj_is_type(o1, &mp_type_bytes) || mp_obj_is_type(o2, &mp_type_bytes)) {
} else if (mp_obj_is_type(o2, &mp_type_bytes)) {
str_bytes_cmp:
mp_warning(MP_WARN_CAT(BytesWarning), "Comparison between bytes and str");
}
return local_false;
#endif
return false;
} else {
goto skip_one_pass;
}
#if MICROPY_PY_STR_BYTES_CMP_WARN
} else if (mp_obj_is_str(o2) && mp_obj_is_type(o1, &mp_type_bytes)) {
// o1 is not a string (else caught above), so the objects are not equal
goto str_bytes_cmp;
#endif
}
// fast path for small ints
if (mp_obj_is_small_int(o1)) {
if (mp_obj_is_small_int(o2)) {
// both SMALL_INT, and not equal if we get here
return local_false;
} else {
goto skip_one_pass;
}
}
// generic type, call binary_op(MP_BINARY_OP_EQUAL)
mp_obj_type_t *type = mp_obj_get_type(o1);
if (type->binary_op != NULL) {
mp_obj_t r = type->binary_op(MP_BINARY_OP_EQUAL, o1, o2);
if (r != MP_OBJ_NULL) {
return r == mp_const_true ? true : false;
while (pass_number < 2) {
const mp_obj_type_t *type = mp_obj_get_type(o1);
// If a full equality test is not needed and the other object is a different
// type then we don't need to bother trying the comparison.
if (type->binary_op != NULL &&
((type->flags & MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE) || mp_obj_get_type(o2) == type)) {
// CPython is asymmetric: it will try __eq__ if there's no __ne__ but not the
// other way around. If the class doesn't need a full test we can skip __ne__.
if (op == MP_BINARY_OP_NOT_EQUAL && (type->flags & MP_TYPE_FLAG_EQ_HAS_NEQ_TEST)) {
mp_obj_t r = type->binary_op(MP_BINARY_OP_NOT_EQUAL, o1, o2);
if (r != MP_OBJ_NULL) {
return r;
}
}
// Try calling __eq__.
mp_obj_t r = type->binary_op(MP_BINARY_OP_EQUAL, o1, o2);
if (r != MP_OBJ_NULL) {
if (op == MP_BINARY_OP_EQUAL) {
return r;
} else {
return mp_obj_is_true(r) ? local_true : local_false;
}
}
}
skip_one_pass:
// Try the other way around if none of the above worked
++pass_number;
mp_obj_t temp = o1;
o1 = o2;
o2 = temp;
}
// equality not implemented, and objects are not the same object, so
// they are defined as not equal
return false;
// equality not implemented, so fall back to pointer conparison
return (o1 == o2) ? local_true : local_false;
}
bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) {
return mp_obj_is_true(mp_obj_equal_not_equal(MP_BINARY_OP_EQUAL, o1, o2));
}
mp_int_t mp_obj_get_int(mp_const_obj_t arg) {
@@ -275,7 +340,7 @@ bool mp_obj_get_float_maybe(mp_obj_t arg, mp_float_t *value) {
} else if (arg == mp_const_true) {
val = 1;
} else if (mp_obj_is_small_int(arg)) {
val = MP_OBJ_SMALL_INT_VALUE(arg);
val = (mp_float_t)MP_OBJ_SMALL_INT_VALUE(arg);
#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
} else if (mp_obj_is_type(arg, &mp_type_int)) {
val = mp_obj_int_as_float_impl(arg);
@@ -294,19 +359,19 @@ mp_float_t mp_obj_get_float(mp_obj_t arg) {
mp_float_t val;
if (!mp_obj_get_float_maybe(arg, &val)) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("can't convert to float");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"can't convert %s to float", mp_obj_get_type_str(arg)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("can't convert to float"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("can't convert %s to float"), mp_obj_get_type_str(arg));
#endif
}
return val;
}
#if MICROPY_PY_BUILTINS_COMPLEX
void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
bool mp_obj_get_complex_maybe(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
if (arg == mp_const_false) {
*real = 0;
*imag = 0;
@@ -314,7 +379,7 @@ void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
*real = 1;
*imag = 0;
} else if (mp_obj_is_small_int(arg)) {
*real = MP_OBJ_SMALL_INT_VALUE(arg);
*real = (mp_float_t)MP_OBJ_SMALL_INT_VALUE(arg);
*imag = 0;
#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
} else if (mp_obj_is_type(arg, &mp_type_int)) {
@@ -327,12 +392,19 @@ void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
} else if (mp_obj_is_type(arg, &mp_type_complex)) {
mp_obj_complex_get(arg, real, imag);
} else {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("can't convert to complex");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"can't convert %s to complex", mp_obj_get_type_str(arg)));
}
return false;
}
return true;
}
void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
if (!mp_obj_get_complex_maybe(arg, real, imag)) {
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("can't convert to complex"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("can't convert %s to complex"), mp_obj_get_type_str(arg));
#endif
}
}
#endif
@@ -345,12 +417,12 @@ void mp_obj_get_array(mp_obj_t o, size_t *len, mp_obj_t **items) {
} else if (mp_obj_is_type(o, &mp_type_list)) {
mp_obj_list_get(o, len, items);
} else {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("expected tuple/list");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"object '%s' isn't a tuple or list", mp_obj_get_type_str(o)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("expected tuple/list"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("object '%s' isn't a tuple or list"), mp_obj_get_type_str(o));
#endif
}
}
@@ -359,12 +431,12 @@ void mp_obj_get_array_fixed_n(mp_obj_t o, size_t len, mp_obj_t **items) {
size_t seq_len;
mp_obj_get_array(o, &seq_len, items);
if (seq_len != len) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_ValueError("tuple/list has wrong length");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_ValueError,
"requested length %d but object has length %d", (int)len, (int)seq_len));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_ValueError(MP_ERROR_TEXT("tuple/list has wrong length"));
#else
mp_raise_msg_varg(&mp_type_ValueError,
MP_ERROR_TEXT("requested length %d but object has length %d"), (int)len, (int)seq_len);
#endif
}
}
@@ -374,13 +446,13 @@ size_t mp_get_index(const mp_obj_type_t *type, size_t len, mp_obj_t index, bool
if (mp_obj_is_small_int(index)) {
i = MP_OBJ_SMALL_INT_VALUE(index);
} else if (!mp_obj_get_int_maybe(index, &i)) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("indices must be integers");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"%q indices must be integers, not %s",
type->name, mp_obj_get_type_str(index)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("indices must be integers"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("%q indices must be integers, not %s"),
type->name, mp_obj_get_type_str(index));
#endif
}
if (i < 0) {
@@ -394,12 +466,11 @@ size_t mp_get_index(const mp_obj_type_t *type, size_t len, mp_obj_t index, bool
}
} else {
if (i < 0 || (mp_uint_t)i >= len) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_msg(&mp_type_IndexError, "index out of range");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_IndexError,
"%q index out of range", type->name));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_msg(&mp_type_IndexError, MP_ERROR_TEXT("index out of range"));
#else
mp_raise_msg_varg(&mp_type_IndexError, MP_ERROR_TEXT("%q index out of range"), type->name);
#endif
}
}
@@ -429,12 +500,12 @@ mp_obj_t mp_obj_id(mp_obj_t o_in) {
mp_obj_t mp_obj_len(mp_obj_t o_in) {
mp_obj_t len = mp_obj_len_maybe(o_in);
if (len == MP_OBJ_NULL) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("object has no len");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"object of type '%s' has no len()", mp_obj_get_type_str(o_in)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("object has no len"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("object of type '%s' has no len()"), mp_obj_get_type_str(o_in));
#endif
} else {
return len;
}
@@ -443,15 +514,15 @@ mp_obj_t mp_obj_len(mp_obj_t o_in) {
// may return MP_OBJ_NULL
mp_obj_t mp_obj_len_maybe(mp_obj_t o_in) {
if (
#if !MICROPY_PY_BUILTINS_STR_UNICODE
#if !MICROPY_PY_BUILTINS_STR_UNICODE
// It's simple - unicode is slow, non-unicode is fast
mp_obj_is_str(o_in) ||
#endif
#endif
mp_obj_is_type(o_in, &mp_type_bytes)) {
GET_STR_LEN(o_in, l);
return MP_OBJ_NEW_SMALL_INT(l);
} else {
mp_obj_type_t *type = mp_obj_get_type(o_in);
const mp_obj_type_t *type = mp_obj_get_type(o_in);
if (type->unary_op != NULL) {
return type->unary_op(MP_UNARY_OP_LEN, o_in);
} else {
@@ -461,7 +532,7 @@ mp_obj_t mp_obj_len_maybe(mp_obj_t o_in) {
}
mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value) {
mp_obj_type_t *type = mp_obj_get_type(base);
const mp_obj_type_t *type = mp_obj_get_type(base);
if (type->subscr != NULL) {
mp_obj_t ret = type->subscr(base, index, value);
if (ret != MP_OBJ_NULL) {
@@ -470,26 +541,26 @@ mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value) {
// TODO: call base classes here?
}
if (value == MP_OBJ_NULL) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("object doesn't support item deletion");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"'%s' object doesn't support item deletion", mp_obj_get_type_str(base)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("object doesn't support item deletion"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("'%s' object doesn't support item deletion"), mp_obj_get_type_str(base));
#endif
} else if (value == MP_OBJ_SENTINEL) {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("object isn't subscriptable");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"'%s' object isn't subscriptable", mp_obj_get_type_str(base)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("object isn't subscriptable"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("'%s' object isn't subscriptable"), mp_obj_get_type_str(base));
#endif
} else {
if (MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE) {
mp_raise_TypeError("object doesn't support item assignment");
} else {
nlr_raise(mp_obj_new_exception_msg_varg(&mp_type_TypeError,
"'%s' object doesn't support item assignment", mp_obj_get_type_str(base)));
}
#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
mp_raise_TypeError(MP_ERROR_TEXT("object doesn't support item assignment"));
#else
mp_raise_msg_varg(&mp_type_TypeError,
MP_ERROR_TEXT("'%s' object doesn't support item assignment"), mp_obj_get_type_str(base));
#endif
}
}
@@ -506,7 +577,7 @@ mp_obj_t mp_identity_getiter(mp_obj_t self, mp_obj_iter_buf_t *iter_buf) {
}
bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
mp_obj_type_t *type = mp_obj_get_type(obj);
const mp_obj_type_t *type = mp_obj_get_type(obj);
if (type->buffer_p.get_buffer == NULL) {
return false;
}
@@ -519,13 +590,15 @@ bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
void mp_get_buffer_raise(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
if (!mp_get_buffer(obj, bufinfo, flags)) {
mp_raise_TypeError("object with buffer protocol required");
mp_raise_TypeError(MP_ERROR_TEXT("object with buffer protocol required"));
}
}
mp_obj_t mp_generic_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
switch (op) {
case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT((mp_uint_t)o_in);
default: return MP_OBJ_NULL; // op not supported
case MP_UNARY_OP_HASH:
return MP_OBJ_NEW_SMALL_INT((mp_uint_t)o_in);
default:
return MP_OBJ_NULL; // op not supported
}
}

View File

@@ -26,6 +26,8 @@
#ifndef MICROPY_INCLUDED_PY_OBJ_H
#define MICROPY_INCLUDED_PY_OBJ_H
#include <assert.h>
#include "py/mpconfig.h"
#include "py/misc.h"
#include "py/qstr.h"
@@ -66,13 +68,13 @@ typedef struct _mp_obj_base_t mp_obj_base_t;
// as many as we can to MP_OBJ_NULL because it's cheaper to load/compare 0.
#if MICROPY_DEBUG_MP_OBJ_SENTINELS
#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0))
#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)4))
#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)8))
#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void *)0))
#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void *)4))
#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void *)8))
#else
#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void*)0))
#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void*)0))
#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void*)4))
#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void *)0))
#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void *)0))
#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void *)4))
#endif
// These macros/inline functions operate on objects and depend on the
@@ -81,15 +83,23 @@ typedef struct _mp_obj_base_t mp_obj_base_t;
#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A
static inline bool mp_obj_is_small_int(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 1) != 0); }
static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 1) != 0;
}
#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 1) | 1))
static inline bool mp_obj_is_qstr(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 3) == 2); }
#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 2)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 2))
static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 7) == 2;
}
#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 2))
static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 7) == 6;
}
#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) (((mp_uint_t)(o)) >> 3)
#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) ((mp_obj_t)(((val) << 3) | 6))
#if MICROPY_PY_BUILTINS_FLOAT
#define mp_const_float_e MP_ROM_PTR(&mp_const_float_e_obj)
@@ -102,20 +112,29 @@ mp_float_t mp_obj_float_get(mp_obj_t self_in);
mp_obj_t mp_obj_new_float(mp_float_t value);
#endif
static inline bool mp_obj_is_obj(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 3) == 0); }
static inline bool mp_obj_is_obj(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 3) == 0;
}
#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_B
static inline bool mp_obj_is_small_int(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 3) == 1); }
static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 3) == 1;
}
#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 2)
#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 2) | 1))
static inline bool mp_obj_is_qstr(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 3) == 3); }
#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 2)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 2) | 3))
static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 7) == 3;
}
#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 3))
static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 7) == 7;
}
#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) (((mp_uint_t)(o)) >> 3)
#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) ((mp_obj_t)(((val) << 3) | 7))
#if MICROPY_PY_BUILTINS_FLOAT
#define mp_const_float_e MP_ROM_PTR(&mp_const_float_e_obj)
@@ -128,13 +147,15 @@ mp_float_t mp_obj_float_get(mp_obj_t self_in);
mp_obj_t mp_obj_new_float(mp_float_t value);
#endif
static inline bool mp_obj_is_obj(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 1) == 0); }
static inline bool mp_obj_is_obj(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 1) == 0;
}
#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
static inline bool mp_obj_is_small_int(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 1) != 0); }
static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 1) != 0;
}
#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 1) | 1))
@@ -142,8 +163,9 @@ static inline bool mp_obj_is_small_int(mp_const_obj_t o)
#define mp_const_float_e MP_ROM_PTR((mp_obj_t)(((0x402df854 & ~3) | 2) + 0x80800000))
#define mp_const_float_pi MP_ROM_PTR((mp_obj_t)(((0x40490fdb & ~3) | 2) + 0x80800000))
static inline bool mp_obj_is_float(mp_const_obj_t o)
{ return (((mp_uint_t)(o)) & 3) == 2 && (((mp_uint_t)(o)) & 0xff800007) != 0x00000006; }
static inline bool mp_obj_is_float(mp_const_obj_t o) {
return (((mp_uint_t)(o)) & 3) == 2 && (((mp_uint_t)(o)) & 0xff800007) != 0x00000006;
}
static inline mp_float_t mp_obj_float_get(mp_const_obj_t o) {
union {
mp_float_t f;
@@ -160,25 +182,41 @@ static inline mp_obj_t mp_obj_new_float(mp_float_t f) {
}
#endif
static inline bool mp_obj_is_qstr(mp_const_obj_t o)
{ return (((mp_uint_t)(o)) & 0xff800007) == 0x00000006; }
#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 0x00000006))
static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
return (((mp_uint_t)(o)) & 0xff80000f) == 0x00000006;
}
#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 4)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 4) | 0x00000006))
static inline bool mp_obj_is_obj(mp_const_obj_t o)
{ return ((((mp_int_t)(o)) & 3) == 0); }
static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
return (((mp_uint_t)(o)) & 0xff80000f) == 0x0000000e;
}
#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) (((mp_uint_t)(o)) >> 4)
#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) ((mp_obj_t)(((val) << 4) | 0xe))
static inline bool mp_obj_is_obj(mp_const_obj_t o) {
return (((mp_int_t)(o)) & 3) == 0;
}
#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
static inline bool mp_obj_is_small_int(mp_const_obj_t o)
{ return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0001000000000000); }
static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
return (((uint64_t)(o)) & 0xffff000000000000) == 0x0001000000000000;
}
#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)((o) << 16)) >> 17)
#define MP_OBJ_NEW_SMALL_INT(small_int) (((((uint64_t)(small_int)) & 0x7fffffffffff) << 1) | 0x0001000000000001)
static inline bool mp_obj_is_qstr(mp_const_obj_t o)
{ return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0002000000000000); }
static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
return (((uint64_t)(o)) & 0xffff000000000000) == 0x0002000000000000;
}
#define MP_OBJ_QSTR_VALUE(o) ((((uint32_t)(o)) >> 1) & 0xffffffff)
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 1) | 0x0002000000000001))
#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)(((uint64_t)(((uint32_t)(qst)) << 1)) | 0x0002000000000001))
static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
return (((uint64_t)(o)) & 0xffff000000000000) == 0x0003000000000000;
}
#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) ((((uint32_t)(o)) >> 46) & 3)
#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) (((uint64_t)(val) << 46) | 0x0003000000000000)
#if MICROPY_PY_BUILTINS_FLOAT
@@ -208,13 +246,17 @@ static inline mp_obj_t mp_obj_new_float(mp_float_t f) {
}
#endif
static inline bool mp_obj_is_obj(mp_const_obj_t o)
{ return ((((uint64_t)(o)) & 0xffff000000000000) == 0x0000000000000000); }
#define MP_OBJ_TO_PTR(o) ((void*)(uintptr_t)(o))
static inline bool mp_obj_is_obj(mp_const_obj_t o) {
return (((uint64_t)(o)) & 0xffff000000000000) == 0x0000000000000000;
}
#define MP_OBJ_TO_PTR(o) ((void *)(uintptr_t)(o))
#define MP_OBJ_FROM_PTR(p) ((mp_obj_t)((uintptr_t)(p)))
// rom object storage needs special handling to widen 32-bit pointer to 64-bits
typedef union _mp_rom_obj_t { uint64_t u64; struct { const void *lo, *hi; } u32; } mp_rom_obj_t;
typedef union _mp_rom_obj_t { uint64_t u64;
struct { const void *lo, *hi;
} u32;
} mp_rom_obj_t;
#define MP_ROM_INT(i) {MP_OBJ_NEW_SMALL_INT(i)}
#define MP_ROM_QSTR(q) {MP_OBJ_NEW_QSTR(q)}
#if MP_ENDIANNESS_LITTLE
@@ -232,7 +274,7 @@ typedef union _mp_rom_obj_t { uint64_t u64; struct { const void *lo, *hi; } u32;
// Cast mp_obj_t to object pointer
#ifndef MP_OBJ_TO_PTR
#define MP_OBJ_TO_PTR(o) ((void*)o)
#define MP_OBJ_TO_PTR(o) ((void *)o)
#endif
// Cast object pointer to mp_obj_t
@@ -242,6 +284,24 @@ typedef union _mp_rom_obj_t { uint64_t u64; struct { const void *lo, *hi; } u32;
// Macros to create objects that are stored in ROM.
#ifndef MP_ROM_NONE
#if MICROPY_OBJ_IMMEDIATE_OBJS
#define MP_ROM_NONE mp_const_none
#else
#define MP_ROM_NONE MP_ROM_PTR(&mp_const_none_obj)
#endif
#endif
#ifndef MP_ROM_FALSE
#if MICROPY_OBJ_IMMEDIATE_OBJS
#define MP_ROM_FALSE mp_const_false
#define MP_ROM_TRUE mp_const_true
#else
#define MP_ROM_FALSE MP_ROM_PTR(&mp_const_false_obj)
#define MP_ROM_TRUE MP_ROM_PTR(&mp_const_true_obj)
#endif
#endif
#ifndef MP_ROM_INT
typedef mp_const_obj_t mp_rom_obj_t;
#define MP_ROM_INT(i) MP_OBJ_NEW_SMALL_INT(i)
@@ -271,25 +331,25 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t;
#define MP_DEFINE_CONST_FUN_OBJ_0(obj_name, fun_name) \
const mp_obj_fun_builtin_fixed_t obj_name = \
{{&mp_type_fun_builtin_0}, .fun._0 = fun_name}
{{&mp_type_fun_builtin_0}, .fun._0 = fun_name}
#define MP_DEFINE_CONST_FUN_OBJ_1(obj_name, fun_name) \
const mp_obj_fun_builtin_fixed_t obj_name = \
{{&mp_type_fun_builtin_1}, .fun._1 = fun_name}
{{&mp_type_fun_builtin_1}, .fun._1 = fun_name}
#define MP_DEFINE_CONST_FUN_OBJ_2(obj_name, fun_name) \
const mp_obj_fun_builtin_fixed_t obj_name = \
{{&mp_type_fun_builtin_2}, .fun._2 = fun_name}
{{&mp_type_fun_builtin_2}, .fun._2 = fun_name}
#define MP_DEFINE_CONST_FUN_OBJ_3(obj_name, fun_name) \
const mp_obj_fun_builtin_fixed_t obj_name = \
{{&mp_type_fun_builtin_3}, .fun._3 = fun_name}
{{&mp_type_fun_builtin_3}, .fun._3 = fun_name}
#define MP_DEFINE_CONST_FUN_OBJ_VAR(obj_name, n_args_min, fun_name) \
const mp_obj_fun_builtin_var_t obj_name = \
{{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, false), .fun.var = fun_name}
{{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, false), .fun.var = fun_name}
#define MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(obj_name, n_args_min, n_args_max, fun_name) \
const mp_obj_fun_builtin_var_t obj_name = \
{{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, false), .fun.var = fun_name}
{{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, false), .fun.var = fun_name}
#define MP_DEFINE_CONST_FUN_OBJ_KW(obj_name, n_args_min, fun_name) \
const mp_obj_fun_builtin_var_t obj_name = \
{{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, true), .fun.kw = fun_name}
{{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, true), .fun.kw = fun_name}
// These macros are used to define constant map/dict objects
// You can put "static" in front of the definition to make it local
@@ -301,7 +361,7 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t;
.is_ordered = 1, \
.used = MP_ARRAY_SIZE(table_name), \
.alloc = MP_ARRAY_SIZE(table_name), \
.table = (mp_map_elem_t*)(mp_rom_map_elem_t*)table_name, \
.table = (mp_map_elem_t *)(mp_rom_map_elem_t *)table_name, \
}
#define MP_DEFINE_CONST_DICT(dict_name, table_name) \
@@ -313,7 +373,7 @@ typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t;
.is_ordered = 1, \
.used = MP_ARRAY_SIZE(table_name), \
.alloc = MP_ARRAY_SIZE(table_name), \
.table = (mp_map_elem_t*)(mp_rom_map_elem_t*)table_name, \
.table = (mp_map_elem_t *)(mp_rom_map_elem_t *)table_name, \
}, \
}
@@ -345,15 +405,10 @@ typedef struct _mp_rom_map_elem_t {
mp_rom_obj_t value;
} mp_rom_map_elem_t;
// TODO maybe have a truncated mp_map_t for fixed tables, since alloc=used
// put alloc last in the structure, so the truncated version does not need it
// this would save 1 ROM word for all ROM objects that have a locals_dict
// would also need a trucated dict structure
typedef struct _mp_map_t {
size_t all_keys_are_qstrs : 1;
size_t is_fixed : 1; // a fixed array that can't be modified; must also be ordered
size_t is_ordered : 1; // an ordered array
size_t is_fixed : 1; // if set, table is fixed/read-only and can't be modified
size_t is_ordered : 1; // if set, table is an ordered array, not a hash map
size_t used : (8 * sizeof(size_t) - 3);
size_t alloc;
mp_map_elem_t *table;
@@ -367,9 +422,10 @@ typedef enum _mp_map_lookup_kind_t {
MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND = 3, // only valid for mp_set_lookup
} mp_map_lookup_kind_t;
extern const mp_map_t mp_const_empty_map;
static inline bool mp_map_slot_is_filled(const mp_map_t *map, size_t pos) { return ((map)->table[pos].key != MP_OBJ_NULL && (map)->table[pos].key != MP_OBJ_SENTINEL); }
static inline bool mp_map_slot_is_filled(const mp_map_t *map, size_t pos) {
assert(pos < map->alloc);
return (map)->table[pos].key != MP_OBJ_NULL && (map)->table[pos].key != MP_OBJ_SENTINEL;
}
void mp_map_init(mp_map_t *map, size_t n);
void mp_map_init_fixed_table(mp_map_t *map, size_t n, const mp_obj_t *table);
@@ -388,7 +444,9 @@ typedef struct _mp_set_t {
mp_obj_t *table;
} mp_set_t;
static inline bool mp_set_slot_is_filled(const mp_set_t *set, size_t pos) { return ((set)->table[pos] != MP_OBJ_NULL && (set)->table[pos] != MP_OBJ_SENTINEL); }
static inline bool mp_set_slot_is_filled(const mp_set_t *set, size_t pos) {
return (set)->table[pos] != MP_OBJ_NULL && (set)->table[pos] != MP_OBJ_SENTINEL;
}
void mp_set_init(mp_set_t *set, size_t n);
mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind);
@@ -406,6 +464,23 @@ typedef mp_obj_t (*mp_fun_var_t)(size_t n, const mp_obj_t *);
// this arg to mp_map_lookup().
typedef mp_obj_t (*mp_fun_kw_t)(size_t n, const mp_obj_t *, mp_map_t *);
// Flags for type behaviour (mp_obj_type_t.flags)
// If MP_TYPE_FLAG_EQ_NOT_REFLEXIVE is clear then __eq__ is reflexive (A==A returns True).
// If MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE is clear then the type can't be equal to an
// instance of any different class that also clears this flag. If this flag is set
// then the type may check for equality against a different type.
// If MP_TYPE_FLAG_EQ_HAS_NEQ_TEST is clear then the type only implements the __eq__
// operator and not the __ne__ operator. If it's set then __ne__ may be implemented.
// If MP_TYPE_FLAG_BINDS_SELF is set then the type as a method binds self as the first arg.
// If MP_TYPE_FLAG_BUILTIN_FUN is set then the type is a built-in function type.
#define MP_TYPE_FLAG_IS_SUBCLASSED (0x0001)
#define MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS (0x0002)
#define MP_TYPE_FLAG_EQ_NOT_REFLEXIVE (0x0004)
#define MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE (0x0008)
#define MP_TYPE_FLAG_EQ_HAS_NEQ_TEST (0x0010)
#define MP_TYPE_FLAG_BINDS_SELF (0x0020)
#define MP_TYPE_FLAG_BUILTIN_FUN (0x0040)
typedef enum {
PRINT_STR = 0,
PRINT_REPR = 1,
@@ -435,18 +510,9 @@ typedef mp_obj_t (*mp_getiter_fun_t)(mp_obj_t self_in, mp_obj_iter_buf_t *iter_b
// Buffer protocol
typedef struct _mp_buffer_info_t {
// if we'd bother to support various versions of structure
// (with different number of fields), we can distinguish
// them with ver = sizeof(struct). Cons: overkill for *micro*?
//int ver; // ?
void *buf; // can be NULL if len == 0
size_t len; // in bytes
int typecode; // as per binary.h
// Rationale: to load arbitrary-sized sprites directly to LCD
// Cons: a bit adhoc usecase
// int stride;
} mp_buffer_info_t;
#define MP_BUFFER_READ (1)
#define MP_BUFFER_WRITE (2)
@@ -589,7 +655,6 @@ extern const mp_obj_type_t mp_type_MemoryError;
extern const mp_obj_type_t mp_type_NameError;
extern const mp_obj_type_t mp_type_NotImplementedError;
extern const mp_obj_type_t mp_type_OSError;
extern const mp_obj_type_t mp_type_TimeoutError;
extern const mp_obj_type_t mp_type_OverflowError;
extern const mp_obj_type_t mp_type_RuntimeError;
extern const mp_obj_type_t mp_type_StopAsyncIteration;
@@ -602,47 +667,71 @@ extern const mp_obj_type_t mp_type_ValueError;
extern const mp_obj_type_t mp_type_ViperTypeError;
extern const mp_obj_type_t mp_type_ZeroDivisionError;
// Constant objects, globally accessible
// The macros are for convenience only
// Constant objects, globally accessible: None, False, True
// These should always be accessed via the below macros.
#if MICROPY_OBJ_IMMEDIATE_OBJS
// None is even while False/True are odd so their types can be distinguished with 1 bit.
#define mp_const_none MP_OBJ_NEW_IMMEDIATE_OBJ(0)
#define mp_const_false MP_OBJ_NEW_IMMEDIATE_OBJ(1)
#define mp_const_true MP_OBJ_NEW_IMMEDIATE_OBJ(3)
#else
#define mp_const_none (MP_OBJ_FROM_PTR(&mp_const_none_obj))
#define mp_const_false (MP_OBJ_FROM_PTR(&mp_const_false_obj))
#define mp_const_true (MP_OBJ_FROM_PTR(&mp_const_true_obj))
#define mp_const_empty_bytes (MP_OBJ_FROM_PTR(&mp_const_empty_bytes_obj))
#define mp_const_empty_tuple (MP_OBJ_FROM_PTR(&mp_const_empty_tuple_obj))
#define mp_const_notimplemented (MP_OBJ_FROM_PTR(&mp_const_notimplemented_obj))
extern const struct _mp_obj_none_t mp_const_none_obj;
extern const struct _mp_obj_bool_t mp_const_false_obj;
extern const struct _mp_obj_bool_t mp_const_true_obj;
#endif
// Constant objects, globally accessible: b'', (), {}, Ellipsis, NotImplemented, GeneratorExit()
// The below macros are for convenience only.
#define mp_const_empty_bytes (MP_OBJ_FROM_PTR(&mp_const_empty_bytes_obj))
#define mp_const_empty_tuple (MP_OBJ_FROM_PTR(&mp_const_empty_tuple_obj))
#define mp_const_notimplemented (MP_OBJ_FROM_PTR(&mp_const_notimplemented_obj))
extern const struct _mp_obj_str_t mp_const_empty_bytes_obj;
extern const struct _mp_obj_tuple_t mp_const_empty_tuple_obj;
extern const struct _mp_obj_dict_t mp_const_empty_dict_obj;
extern const struct _mp_obj_singleton_t mp_const_ellipsis_obj;
extern const struct _mp_obj_singleton_t mp_const_notimplemented_obj;
extern const struct _mp_obj_exception_t mp_const_GeneratorExit_obj;
// Fixed empty map. Useful when calling keyword-receiving functions
// without any keywords from C, etc.
#define mp_const_empty_map (mp_const_empty_dict_obj.map)
// General API for objects
// These macros are derived from more primitive ones and are used to
// check for more specific object types.
// Note: these are kept as macros because inline functions sometimes use much
// more code space than the equivalent macros, depending on the compiler.
#define mp_obj_is_type(o, t) (mp_obj_is_obj(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type == (t))) // this does not work for checking int, str or fun; use below macros for that
#define mp_obj_is_type(o, t) (mp_obj_is_obj(o) && (((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type == (t))) // this does not work for checking int, str or fun; use below macros for that
#if MICROPY_OBJ_IMMEDIATE_OBJS
// bool's are immediates, not real objects, so test for the 2 possible values.
#define mp_obj_is_bool(o) ((o) == mp_const_false || (o) == mp_const_true)
#else
#define mp_obj_is_bool(o) mp_obj_is_type(o, &mp_type_bool)
#endif
#define mp_obj_is_int(o) (mp_obj_is_small_int(o) || mp_obj_is_type(o, &mp_type_int))
#define mp_obj_is_str(o) (mp_obj_is_qstr(o) || mp_obj_is_type(o, &mp_type_str))
#define mp_obj_is_str_or_bytes(o) (mp_obj_is_qstr(o) || (mp_obj_is_obj(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->binary_op == mp_obj_str_binary_op))
#define mp_obj_is_fun(o) (mp_obj_is_obj(o) && (((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->name == MP_QSTR_function))
#define mp_obj_is_str_or_bytes(o) (mp_obj_is_qstr(o) || (mp_obj_is_obj(o) && ((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type->binary_op == mp_obj_str_binary_op))
#define mp_obj_is_dict_or_ordereddict(o) (mp_obj_is_obj(o) && ((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type->make_new == mp_obj_dict_make_new)
#define mp_obj_is_fun(o) (mp_obj_is_obj(o) && (((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type->name == MP_QSTR_function))
mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict);
static inline mp_obj_t mp_obj_new_bool(mp_int_t x) { return x ? mp_const_true : mp_const_false; }
static inline mp_obj_t mp_obj_new_bool(mp_int_t x) {
return x ? mp_const_true : mp_const_false;
}
mp_obj_t mp_obj_new_cell(mp_obj_t obj);
mp_obj_t mp_obj_new_int(mp_int_t value);
mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value);
mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, unsigned int base);
mp_obj_t mp_obj_new_int_from_ll(long long val); // this must return a multi-precision integer object (or raise an overflow exception)
mp_obj_t mp_obj_new_int_from_ull(unsigned long long val); // this must return a multi-precision integer object (or raise an overflow exception)
mp_obj_t mp_obj_new_str(const char* data, size_t len);
mp_obj_t mp_obj_new_str_via_qstr(const char* data, size_t len);
mp_obj_t mp_obj_new_str(const char *data, size_t len);
mp_obj_t mp_obj_new_str_via_qstr(const char *data, size_t len);
mp_obj_t mp_obj_new_str_from_vstr(const mp_obj_type_t *type, vstr_t *vstr);
mp_obj_t mp_obj_new_bytes(const byte* data, size_t len);
mp_obj_t mp_obj_new_bytes(const byte *data, size_t len);
mp_obj_t mp_obj_new_bytearray(size_t n, void *items);
mp_obj_t mp_obj_new_bytearray_by_ref(size_t n, void *items);
#if MICROPY_PY_BUILTINS_FLOAT
@@ -650,10 +739,17 @@ mp_obj_t mp_obj_new_int_from_float(mp_float_t val);
mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag);
#endif
mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type);
mp_obj_t mp_obj_new_exception_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg);
mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, size_t n_args, const mp_obj_t *args);
mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg);
mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
#define mp_obj_new_exception_msg(exc_type, msg) mp_obj_new_exception(exc_type)
#define mp_obj_new_exception_msg_varg(exc_type, ...) mp_obj_new_exception(exc_type)
#else
mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg);
mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
#endif
#ifdef va_start
mp_obj_t mp_obj_new_exception_msg_vlist(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, va_list arg); // same fmt restrictions as above
#endif
mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table);
mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table);
mp_obj_t mp_obj_new_fun_asm(size_t n_args, const void *fun_data, mp_uint_t type_sig);
@@ -669,10 +765,10 @@ mp_obj_t mp_obj_new_getitem_iter(mp_obj_t *args, mp_obj_iter_buf_t *iter_buf);
mp_obj_t mp_obj_new_module(qstr module_name);
mp_obj_t mp_obj_new_memoryview(byte typecode, size_t nitems, void *items);
mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in);
const mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in);
const char *mp_obj_get_type_str(mp_const_obj_t o_in);
bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo); // arguments should be type objects
mp_obj_t mp_instance_cast_to_native_base(mp_const_obj_t self_in, mp_const_obj_t native_type);
mp_obj_t mp_obj_cast_to_native_base(mp_obj_t self_in, mp_const_obj_t native_type);
void mp_obj_print_helper(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
void mp_obj_print(mp_obj_t o, mp_print_kind_t kind);
@@ -680,9 +776,14 @@ void mp_obj_print_exception(const mp_print_t *print, mp_obj_t exc);
bool mp_obj_is_true(mp_obj_t arg);
bool mp_obj_is_callable(mp_obj_t o_in);
mp_obj_t mp_obj_equal_not_equal(mp_binary_op_t op, mp_obj_t o1, mp_obj_t o2);
bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2);
static inline bool mp_obj_is_integer(mp_const_obj_t o) { return mp_obj_is_int(o) || mp_obj_is_type(o, &mp_type_bool); } // returns true if o is bool, small int or long int
// returns true if o is bool, small int or long int
static inline bool mp_obj_is_integer(mp_const_obj_t o) {
return mp_obj_is_int(o) || mp_obj_is_bool(o);
}
mp_int_t mp_obj_get_int(mp_const_obj_t arg);
mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg);
bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value);
@@ -690,6 +791,7 @@ bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value);
mp_float_t mp_obj_get_float(mp_obj_t self_in);
bool mp_obj_get_float_maybe(mp_obj_t arg, mp_float_t *value);
void mp_obj_get_complex(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
bool mp_obj_get_complex_maybe(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
#endif
void mp_obj_get_array(mp_obj_t o, size_t *len, mp_obj_t **items); // *items may point inside a GC block
void mp_obj_get_array_fixed_n(mp_obj_t o, size_t len, mp_obj_t **items); // *items may point inside a GC block
@@ -709,6 +811,8 @@ void mp_obj_cell_set(mp_obj_t self_in, mp_obj_t obj);
mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in);
// Will raise exception if value doesn't fit into mp_int_t
mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in);
// Will raise exception if value is negative or doesn't fit into mp_uint_t
mp_uint_t mp_obj_int_get_uint_checked(mp_const_obj_t self_in);
// exception
#define mp_obj_is_native_exception_instance(o) (mp_obj_get_type(o)->make_new == mp_obj_exception_make_new)
@@ -722,6 +826,10 @@ mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in);
mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in);
void mp_init_emergency_exception_buf(void);
static inline mp_obj_t mp_obj_new_exception_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg) {
assert(exc_type->make_new == mp_obj_exception_make_new);
return mp_obj_exception_make_new(exc_type, 1, 0, &arg);
}
// str
bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2);
@@ -734,10 +842,45 @@ void mp_str_print_quoted(const mp_print_t *print, const byte *str_data, size_t s
#if MICROPY_PY_BUILTINS_FLOAT
// float
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
static inline float mp_obj_get_float_to_f(mp_obj_t o) {
return mp_obj_get_float(o);
}
static inline double mp_obj_get_float_to_d(mp_obj_t o) {
return (double)mp_obj_get_float(o);
}
static inline mp_obj_t mp_obj_new_float_from_f(float o) {
return mp_obj_new_float(o);
}
static inline mp_obj_t mp_obj_new_float_from_d(double o) {
return mp_obj_new_float((mp_float_t)o);
}
#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
static inline float mp_obj_get_float_to_f(mp_obj_t o) {
return (float)mp_obj_get_float(o);
}
static inline double mp_obj_get_float_to_d(mp_obj_t o) {
return mp_obj_get_float(o);
}
static inline mp_obj_t mp_obj_new_float_from_f(float o) {
return mp_obj_new_float((mp_float_t)o);
}
static inline mp_obj_t mp_obj_new_float_from_d(double o) {
return mp_obj_new_float(o);
}
#endif
#if MICROPY_FLOAT_HIGH_QUALITY_HASH
mp_int_t mp_float_hash(mp_float_t val);
#else
static inline mp_int_t mp_float_hash(mp_float_t val) { return (mp_int_t)val; }
static inline mp_int_t mp_float_hash(mp_float_t val) {
return (mp_int_t)val;
}
#endif
mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t rhs); // can return MP_OBJ_NULL if op not supported
@@ -766,20 +909,35 @@ typedef struct _mp_obj_dict_t {
mp_obj_base_t base;
mp_map_t map;
} mp_obj_dict_t;
mp_obj_t mp_obj_dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args);
void mp_obj_dict_init(mp_obj_dict_t *dict, size_t n_args);
size_t mp_obj_dict_len(mp_obj_t self_in);
mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index);
mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value);
mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key);
mp_obj_t mp_obj_dict_copy(mp_obj_t self_in);
static inline mp_map_t *mp_obj_dict_get_map(mp_obj_t dict) {
return &((mp_obj_dict_t*)MP_OBJ_TO_PTR(dict))->map;
return &((mp_obj_dict_t *)MP_OBJ_TO_PTR(dict))->map;
}
// set
void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item);
// slice indexes resolved to particular sequence
typedef struct {
mp_int_t start;
mp_int_t stop;
mp_int_t step;
} mp_bound_slice_t;
// slice
void mp_obj_slice_get(mp_obj_t self_in, mp_obj_t *start, mp_obj_t *stop, mp_obj_t *step);
typedef struct _mp_obj_slice_t {
mp_obj_base_t base;
mp_obj_t start;
mp_obj_t stop;
mp_obj_t step;
} mp_obj_slice_t;
void mp_obj_slice_indices(mp_obj_t self_in, mp_int_t length, mp_bound_slice_t *result);
// functions
@@ -815,7 +973,7 @@ typedef struct _mp_obj_module_t {
mp_obj_dict_t *globals;
} mp_obj_module_t;
static inline mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t module) {
return ((mp_obj_module_t*)MP_OBJ_TO_PTR(module))->globals;
return ((mp_obj_module_t *)MP_OBJ_TO_PTR(module))->globals;
}
// check if given module object is a package
bool mp_obj_is_package(mp_obj_t module);
@@ -836,13 +994,6 @@ const mp_obj_t *mp_obj_property_get(mp_obj_t self_in);
// sequence helpers
// slice indexes resolved to particular sequence
typedef struct {
mp_uint_t start;
mp_uint_t stop;
mp_int_t step;
} mp_bound_slice_t;
void mp_seq_multiply(const void *items, size_t item_sz, size_t len, size_t times, void *dest);
#if MICROPY_PY_BUILTINS_SLICE
bool mp_seq_get_fast_slice_indexes(mp_uint_t len, mp_obj_t slice, mp_bound_slice_t *indexes);
@@ -854,19 +1005,19 @@ bool mp_seq_cmp_objs(mp_uint_t op, const mp_obj_t *items1, size_t len1, const mp
mp_obj_t mp_seq_index_obj(const mp_obj_t *items, size_t len, size_t n_args, const mp_obj_t *args);
mp_obj_t mp_seq_count_obj(const mp_obj_t *items, size_t len, mp_obj_t value);
mp_obj_t mp_seq_extract_slice(size_t len, const mp_obj_t *seq, mp_bound_slice_t *indexes);
// Helper to clear stale pointers from allocated, but unused memory, to preclude GC problems
#define mp_seq_clear(start, len, alloc_len, item_sz) memset((byte*)(start) + (len) * (item_sz), 0, ((alloc_len) - (len)) * (item_sz))
#define mp_seq_clear(start, len, alloc_len, item_sz) memset((byte *)(start) + (len) * (item_sz), 0, ((alloc_len) - (len)) * (item_sz))
// Note: dest and slice regions may overlap
#define mp_seq_replace_slice_no_grow(dest, dest_len, beg, end, slice, slice_len, item_sz) \
/*printf("memcpy(%p, %p, %d)\n", dest + beg, slice, slice_len * (item_sz));*/ \
memcpy(((char*)dest) + (beg) * (item_sz), slice, slice_len * (item_sz)); \
/*printf("memmove(%p, %p, %d)\n", dest + (beg + slice_len), dest + end, (dest_len - end) * (item_sz));*/ \
memmove(((char*)dest) + (beg + slice_len) * (item_sz), ((char*)dest) + (end) * (item_sz), (dest_len - end) * (item_sz));
memmove(((char *)dest) + (beg) * (item_sz), slice, slice_len * (item_sz)); \
memmove(((char *)dest) + (beg + slice_len) * (item_sz), ((char *)dest) + (end) * (item_sz), (dest_len - end) * (item_sz));
// Note: dest and slice regions may overlap
#define mp_seq_replace_slice_grow_inplace(dest, dest_len, beg, end, slice, slice_len, len_adj, item_sz) \
/*printf("memmove(%p, %p, %d)\n", dest + beg + len_adj, dest + beg, (dest_len - beg) * (item_sz));*/ \
memmove(((char*)dest) + (beg + slice_len) * (item_sz), ((char*)dest) + (end) * (item_sz), ((dest_len) + (len_adj) - ((beg) + (slice_len))) * (item_sz)); \
memmove(((char*)dest) + (beg) * (item_sz), slice, slice_len * (item_sz));
memmove(((char *)dest) + (beg + slice_len) * (item_sz), ((char *)dest) + (end) * (item_sz), ((dest_len) + (len_adj) - ((beg) + (slice_len))) * (item_sz)); \
memmove(((char *)dest) + (beg) * (item_sz), slice, slice_len * (item_sz));
// Provide translation for legacy API
#define MP_OBJ_IS_SMALL_INT mp_obj_is_small_int

View File

@@ -117,10 +117,10 @@ STATIC mp_obj_t array_construct(char typecode, mp_obj_t initializer) {
// other arrays can only be raw-initialised from bytes and bytearray objects
mp_buffer_info_t bufinfo;
if (((MICROPY_PY_BUILTINS_BYTEARRAY
&& typecode == BYTEARRAY_TYPECODE)
|| (MICROPY_PY_ARRAY
&& (mp_obj_is_type(initializer, &mp_type_bytes)
|| (MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(initializer, &mp_type_bytearray)))))
&& typecode == BYTEARRAY_TYPECODE)
|| (MICROPY_PY_ARRAY
&& (mp_obj_is_type(initializer, &mp_type_bytes)
|| (MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(initializer, &mp_type_bytearray)))))
&& mp_get_buffer(initializer, &bufinfo, MP_BUFFER_READ)) {
// construct array from raw bytes
// we round-down the len to make it a multiple of sz (CPython raises error)
@@ -201,11 +201,7 @@ STATIC mp_obj_t bytearray_make_new(const mp_obj_type_t *type_in, size_t n_args,
mp_obj_t mp_obj_new_memoryview(byte typecode, size_t nitems, void *items) {
mp_obj_array_t *self = m_new_obj(mp_obj_array_t);
self->base.type = &mp_type_memoryview;
self->typecode = typecode;
self->memview_offset = 0;
self->len = nitems;
self->items = items;
mp_obj_memoryview_init(self, typecode, 0, nitems, items);
return MP_OBJ_FROM_PTR(self);
}
@@ -224,6 +220,14 @@ STATIC mp_obj_t memoryview_make_new(const mp_obj_type_t *type_in, size_t n_args,
bufinfo.len / mp_binary_get_size('@', bufinfo.typecode, NULL),
bufinfo.buf));
// If the input object is a memoryview then need to point the items of the
// new memoryview to the start of the buffer so the GC can trace it.
if (mp_obj_get_type(args[0]) == &mp_type_memoryview) {
mp_obj_array_t *other = MP_OBJ_TO_PTR(args[0]);
self->memview_offset = other->memview_offset;
self->items = other->items;
}
// test if the object can be written to
if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_RW)) {
self->typecode |= MP_OBJ_ARRAY_TYPECODE_FLAG_RW; // indicate writable buffer
@@ -249,12 +253,26 @@ STATIC void memoryview_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
STATIC mp_obj_t array_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
switch (op) {
case MP_UNARY_OP_BOOL: return mp_obj_new_bool(o->len != 0);
case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(o->len);
default: return MP_OBJ_NULL; // op not supported
case MP_UNARY_OP_BOOL:
return mp_obj_new_bool(o->len != 0);
case MP_UNARY_OP_LEN:
return MP_OBJ_NEW_SMALL_INT(o->len);
default:
return MP_OBJ_NULL; // op not supported
}
}
STATIC int typecode_for_comparison(int typecode, bool *is_unsigned) {
if (typecode == BYTEARRAY_TYPECODE) {
typecode = 'B';
}
if (typecode <= 'Z') {
typecode += 32; // to lowercase
*is_unsigned = true;
}
return typecode;
}
STATIC mp_obj_t array_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
mp_obj_array_t *lhs = MP_OBJ_TO_PTR(lhs_in);
switch (op) {
@@ -272,7 +290,7 @@ STATIC mp_obj_t array_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs
// note: lhs->len is element count of lhs, lhs_bufinfo.len is byte count
mp_obj_array_t *res = array_new(lhs_bufinfo.typecode, lhs->len + rhs_len);
mp_seq_cat((byte*)res->items, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_len * sz, byte);
mp_seq_cat((byte *)res->items, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_len * sz, byte);
return MP_OBJ_FROM_PTR(res);
}
@@ -309,14 +327,33 @@ STATIC mp_obj_t array_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs
return mp_const_false;
}
case MP_BINARY_OP_EQUAL: {
case MP_BINARY_OP_EQUAL:
case MP_BINARY_OP_LESS:
case MP_BINARY_OP_LESS_EQUAL:
case MP_BINARY_OP_MORE:
case MP_BINARY_OP_MORE_EQUAL: {
mp_buffer_info_t lhs_bufinfo;
mp_buffer_info_t rhs_bufinfo;
array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
if (!mp_get_buffer(rhs_in, &rhs_bufinfo, MP_BUFFER_READ)) {
return mp_const_false;
}
return mp_obj_new_bool(mp_seq_cmp_bytes(op, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len));
// mp_seq_cmp_bytes is used so only compatible representations can be correctly compared.
// The type doesn't matter: array/bytearray/str/bytes all have the same buffer layout, so
// just check if the typecodes are compatible; for testing equality the types should have the
// same code except for signedness, and not be floating point because nan never equals nan.
// For > and < the types should be the same and unsigned.
// Note that typecode_for_comparison always returns lowercase letters to save code size.
// No need for (& TYPECODE_MASK) here: xxx_get_buffer already takes care of that.
bool is_unsigned = false;
const int lhs_code = typecode_for_comparison(lhs_bufinfo.typecode, &is_unsigned);
const int rhs_code = typecode_for_comparison(rhs_bufinfo.typecode, &is_unsigned);
if (lhs_code == rhs_code && lhs_code != 'f' && lhs_code != 'd' && (op == MP_BINARY_OP_EQUAL || is_unsigned)) {
return mp_obj_new_bool(mp_seq_cmp_bytes(op, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len));
}
// mp_obj_equal_not_equal treats returning MP_OBJ_NULL as 'fall back to pointer comparison'
// for MP_BINARY_OP_EQUAL but that is incompatible with CPython.
mp_raise_NotImplementedError(NULL);
}
default:
@@ -371,7 +408,7 @@ STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in) {
}
// extend
mp_seq_copy((byte*)self->items + self->len * sz, arg_bufinfo.buf, len * sz, byte);
mp_seq_copy((byte *)self->items + self->len * sz, arg_bufinfo.buf, len * sz, byte);
self->len += len;
return mp_const_none;
@@ -388,11 +425,11 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value
return MP_OBJ_NULL; // op not supported
} else {
mp_obj_array_t *o = MP_OBJ_TO_PTR(self_in);
#if MICROPY_PY_BUILTINS_SLICE
#if MICROPY_PY_BUILTINS_SLICE
if (mp_obj_is_type(index_in, &mp_type_slice)) {
mp_bound_slice_t slice;
if (!mp_seq_get_fast_slice_indexes(o->len, index_in, &slice)) {
mp_raise_NotImplementedError("only slices with step=1 (aka None) are supported");
mp_raise_NotImplementedError(MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
}
if (value != MP_OBJ_SENTINEL) {
#if MICROPY_PY_ARRAY_SLICE_ASSIGN
@@ -400,18 +437,18 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value
size_t src_len;
void *src_items;
size_t item_sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
if (mp_obj_is_obj(value) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(value))->type->subscr == array_subscr) {
if (mp_obj_is_obj(value) && ((mp_obj_base_t *)MP_OBJ_TO_PTR(value))->type->subscr == array_subscr) {
// value is array, bytearray or memoryview
mp_obj_array_t *src_slice = MP_OBJ_TO_PTR(value);
if (item_sz != mp_binary_get_size('@', src_slice->typecode & TYPECODE_MASK, NULL)) {
compat_error:
mp_raise_ValueError("lhs and rhs should be compatible");
mp_raise_ValueError(MP_ERROR_TEXT("lhs and rhs should be compatible"));
}
src_len = src_slice->len;
src_items = src_slice->items;
#if MICROPY_PY_BUILTINS_MEMORYVIEW
if (mp_obj_is_type(value, &mp_type_memoryview)) {
src_items = (uint8_t*)src_items + (src_slice->memview_offset * item_sz);
src_items = (uint8_t *)src_items + (src_slice->memview_offset * item_sz);
}
#endif
} else if (mp_obj_is_type(value, &mp_type_bytes)) {
@@ -423,12 +460,12 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value
src_len = bufinfo.len;
src_items = bufinfo.buf;
} else {
mp_raise_NotImplementedError("array/bytes required on right side");
mp_raise_NotImplementedError(MP_ERROR_TEXT("array/bytes required on right side"));
}
// TODO: check src/dst compat
mp_int_t len_adj = src_len - (slice.stop - slice.start);
uint8_t* dest_items = o->items;
uint8_t *dest_items = o->items;
#if MICROPY_PY_BUILTINS_MEMORYVIEW
if (o->base.type == &mp_type_memoryview) {
if (!(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW)) {
@@ -442,7 +479,7 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value
}
#endif
if (len_adj > 0) {
if (len_adj > o->free) {
if ((size_t)len_adj > o->free) {
// TODO: alloc policy; at the moment we go conservative
o->items = m_renew(byte, o->items, (o->len + o->free) * item_sz, (o->len + len_adj) * item_sz);
o->free = len_adj;
@@ -479,11 +516,11 @@ STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value
#endif
{
res = array_new(o->typecode, slice.stop - slice.start);
memcpy(res->items, (uint8_t*)o->items + slice.start * sz, (slice.stop - slice.start) * sz);
memcpy(res->items, (uint8_t *)o->items + slice.start * sz, (slice.stop - slice.start) * sz);
}
return MP_OBJ_FROM_PTR(res);
} else
#endif
#endif
{
size_t index = mp_get_index(o->base.type, o->len, index_in, false);
#if MICROPY_PY_BUILTINS_MEMORYVIEW
@@ -519,7 +556,7 @@ STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_ui
// read-only memoryview
return 1;
}
bufinfo->buf = (uint8_t*)bufinfo->buf + (size_t)o->memview_offset * sz;
bufinfo->buf = (uint8_t *)bufinfo->buf + (size_t)o->memview_offset * sz;
}
#else
(void)flags;
@@ -550,13 +587,14 @@ const mp_obj_type_t mp_type_array = {
.binary_op = array_binary_op,
.subscr = array_subscr,
.buffer_p = { .get_buffer = array_get_buffer },
.locals_dict = (mp_obj_dict_t*)&array_locals_dict,
.locals_dict = (mp_obj_dict_t *)&array_locals_dict,
};
#endif
#if MICROPY_PY_BUILTINS_BYTEARRAY
const mp_obj_type_t mp_type_bytearray = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE,
.name = MP_QSTR_bytearray,
.print = array_print,
.make_new = bytearray_make_new,
@@ -565,13 +603,14 @@ const mp_obj_type_t mp_type_bytearray = {
.binary_op = array_binary_op,
.subscr = array_subscr,
.buffer_p = { .get_buffer = array_get_buffer },
.locals_dict = (mp_obj_dict_t*)&array_locals_dict,
.locals_dict = (mp_obj_dict_t *)&array_locals_dict,
};
#endif
#if MICROPY_PY_BUILTINS_MEMORYVIEW
const mp_obj_type_t mp_type_memoryview = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE,
.name = MP_QSTR_memoryview,
.make_new = memoryview_make_new,
.getiter = array_iterator_new,
@@ -629,7 +668,7 @@ STATIC mp_obj_t array_it_iternext(mp_obj_t self_in) {
}
}
STATIC const mp_obj_type_t array_it_type = {
STATIC const mp_obj_type_t mp_type_array_it = {
{ &mp_type_type },
.name = MP_QSTR_iterator,
.getiter = mp_identity_getiter,
@@ -639,8 +678,8 @@ STATIC const mp_obj_type_t array_it_type = {
STATIC mp_obj_t array_iterator_new(mp_obj_t array_in, mp_obj_iter_buf_t *iter_buf) {
assert(sizeof(mp_obj_array_t) <= sizeof(mp_obj_iter_buf_t));
mp_obj_array_t *array = MP_OBJ_TO_PTR(array_in);
mp_obj_array_it_t *o = (mp_obj_array_it_t*)iter_buf;
o->base.type = &array_it_type;
mp_obj_array_it_t *o = (mp_obj_array_it_t *)iter_buf;
o->base.type = &mp_type_array_it;
o->array = array;
o->offset = 0;
o->cur = 0;

View File

@@ -49,4 +49,14 @@ typedef struct _mp_obj_array_t {
void *items;
} mp_obj_array_t;
#if MICROPY_PY_BUILTINS_MEMORYVIEW
static inline void mp_obj_memoryview_init(mp_obj_array_t *self, size_t typecode, size_t offset, size_t len, void *items) {
self->base.type = &mp_type_memoryview;
self->typecode = typecode;
self->free = offset;
self->len = len;
self->items = items;
}
#endif
#endif // MICROPY_INCLUDED_PY_OBJARRAY_H

View File

@@ -51,7 +51,7 @@ void mp_obj_attrtuple_print_helper(const mp_print_t *print, const qstr *fields,
STATIC void mp_obj_attrtuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
(void)kind;
mp_obj_tuple_t *o = MP_OBJ_TO_PTR(o_in);
const qstr *fields = (const qstr*)MP_OBJ_TO_PTR(o->items[o->len]);
const qstr *fields = (const qstr *)MP_OBJ_TO_PTR(o->items[o->len]);
mp_obj_attrtuple_print_helper(print, fields, o);
}
@@ -60,7 +60,7 @@ STATIC void mp_obj_attrtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
// load attribute
mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
size_t len = self->len;
const qstr *fields = (const qstr*)MP_OBJ_TO_PTR(self->items[len]);
const qstr *fields = (const qstr *)MP_OBJ_TO_PTR(self->items[len]);
for (size_t i = 0; i < len; i++) {
if (fields[i] == attr) {
dest[0] = self->items[i];

View File

@@ -28,21 +28,31 @@
#include "py/runtime.h"
#if MICROPY_OBJ_IMMEDIATE_OBJS
#define BOOL_VALUE(o) ((o) == mp_const_false ? 0 : 1)
#else
#define BOOL_VALUE(o) (((mp_obj_bool_t *)MP_OBJ_TO_PTR(o))->value)
typedef struct _mp_obj_bool_t {
mp_obj_base_t base;
bool value;
} mp_obj_bool_t;
#endif
STATIC void bool_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
mp_obj_bool_t *self = MP_OBJ_TO_PTR(self_in);
bool value = BOOL_VALUE(self_in);
if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
if (self->value) {
if (value) {
mp_print_str(print, "true");
} else {
mp_print_str(print, "false");
}
} else {
if (self->value) {
if (value) {
mp_print_str(print, "True");
} else {
mp_print_str(print, "False");
@@ -65,17 +75,18 @@ STATIC mp_obj_t bool_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
if (op == MP_UNARY_OP_LEN) {
return MP_OBJ_NULL;
}
mp_obj_bool_t *self = MP_OBJ_TO_PTR(o_in);
return mp_unary_op(op, MP_OBJ_NEW_SMALL_INT(self->value));
bool value = BOOL_VALUE(o_in);
return mp_unary_op(op, MP_OBJ_NEW_SMALL_INT(value));
}
STATIC mp_obj_t bool_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
mp_obj_bool_t *self = MP_OBJ_TO_PTR(lhs_in);
return mp_binary_op(op, MP_OBJ_NEW_SMALL_INT(self->value), rhs_in);
bool value = BOOL_VALUE(lhs_in);
return mp_binary_op(op, MP_OBJ_NEW_SMALL_INT(value), rhs_in);
}
const mp_obj_type_t mp_type_bool = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE, // can match all numeric types
.name = MP_QSTR_bool,
.print = bool_print,
.make_new = bool_make_new,
@@ -83,5 +94,7 @@ const mp_obj_type_t mp_type_bool = {
.binary_op = bool_binary_op,
};
#if !MICROPY_OBJ_IMMEDIATE_OBJS
const mp_obj_bool_t mp_const_false_obj = {{&mp_type_bool}, false};
const mp_obj_bool_t mp_const_true_obj = {{&mp_type_bool}, true};
#endif

View File

@@ -98,13 +98,13 @@ STATIC void bound_meth_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
STATIC const mp_obj_type_t mp_type_bound_meth = {
{ &mp_type_type },
.name = MP_QSTR_bound_method,
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
.print = bound_meth_print,
#endif
#endif
.call = bound_meth_call,
#if MICROPY_PY_FUNCTION_ATTRS
#if MICROPY_PY_FUNCTION_ATTRS
.attr = bound_meth_attr,
#endif
#endif
};
mp_obj_t mp_obj_new_bound_meth(mp_obj_t meth, mp_obj_t self) {

View File

@@ -58,9 +58,9 @@ STATIC void cell_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t k
STATIC const mp_obj_type_t mp_type_cell = {
{ &mp_type_type },
.name = MP_QSTR_, // cell representation is just value in < >
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
.print = cell_print,
#endif
#endif
};
mp_obj_t mp_obj_new_cell(mp_obj_t obj) {

View File

@@ -78,18 +78,19 @@ STATIC void closure_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_
}
#endif
const mp_obj_type_t closure_type = {
const mp_obj_type_t mp_type_closure = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF,
.name = MP_QSTR_closure,
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
.print = closure_print,
#endif
#endif
.call = closure_call,
};
mp_obj_t mp_obj_new_closure(mp_obj_t fun, size_t n_closed_over, const mp_obj_t *closed) {
mp_obj_closure_t *o = m_new_obj_var(mp_obj_closure_t, mp_obj_t, n_closed_over);
o->base.type = &closure_type;
o->base.type = &mp_type_closure;
o->fun = fun;
o->n_closed = n_closed_over;
memcpy(o->closed, closed, n_closed_over * sizeof(mp_obj_t));

View File

@@ -45,17 +45,17 @@ typedef struct _mp_obj_complex_t {
STATIC void complex_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
(void)kind;
mp_obj_complex_t *o = MP_OBJ_TO_PTR(o_in);
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
char buf[16];
#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
const int precision = 6;
#else
const int precision = 7;
#endif
#else
#else
char buf[32];
const int precision = 16;
#endif
#endif
if (o->real == 0) {
mp_format_float(o->imag, buf, sizeof(buf), 'g', precision, '\0');
mp_printf(print, "%sj", buf);
@@ -117,13 +117,18 @@ STATIC mp_obj_t complex_make_new(const mp_obj_type_t *type_in, size_t n_args, si
STATIC mp_obj_t complex_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
mp_obj_complex_t *o = MP_OBJ_TO_PTR(o_in);
switch (op) {
case MP_UNARY_OP_BOOL: return mp_obj_new_bool(o->real != 0 || o->imag != 0);
case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT(mp_float_hash(o->real) ^ mp_float_hash(o->imag));
case MP_UNARY_OP_POSITIVE: return o_in;
case MP_UNARY_OP_NEGATIVE: return mp_obj_new_complex(-o->real, -o->imag);
case MP_UNARY_OP_BOOL:
return mp_obj_new_bool(o->real != 0 || o->imag != 0);
case MP_UNARY_OP_HASH:
return MP_OBJ_NEW_SMALL_INT(mp_float_hash(o->real) ^ mp_float_hash(o->imag));
case MP_UNARY_OP_POSITIVE:
return o_in;
case MP_UNARY_OP_NEGATIVE:
return mp_obj_new_complex(-o->real, -o->imag);
case MP_UNARY_OP_ABS:
return mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(o->real*o->real + o->imag*o->imag));
default: return MP_OBJ_NULL; // op not supported
return mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(o->real * o->real + o->imag * o->imag));
default:
return MP_OBJ_NULL; // op not supported
}
}
@@ -147,6 +152,7 @@ STATIC void complex_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
const mp_obj_type_t mp_type_complex = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_EQ_NOT_REFLEXIVE | MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE,
.name = MP_QSTR_complex,
.print = complex_print,
.make_new = complex_make_new,
@@ -172,7 +178,10 @@ void mp_obj_complex_get(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag) {
mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_float_t lhs_imag, mp_obj_t rhs_in) {
mp_float_t rhs_real, rhs_imag;
mp_obj_get_complex(rhs_in, &rhs_real, &rhs_imag); // can be any type, this function will convert to float (if possible)
if (!mp_obj_get_complex_maybe(rhs_in, &rhs_real, &rhs_imag)) {
return MP_OBJ_NULL; // op not supported
}
switch (op) {
case MP_BINARY_OP_ADD:
case MP_BINARY_OP_INPLACE_ADD:
@@ -187,7 +196,7 @@ mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_flo
case MP_BINARY_OP_MULTIPLY:
case MP_BINARY_OP_INPLACE_MULTIPLY: {
mp_float_t real;
multiply:
multiply:
real = lhs_real * rhs_real - lhs_imag * rhs_imag;
lhs_imag = lhs_real * rhs_imag + lhs_imag * rhs_real;
lhs_real = real;
@@ -195,13 +204,13 @@ mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_flo
}
case MP_BINARY_OP_FLOOR_DIVIDE:
case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
mp_raise_TypeError("can't truncate-divide a complex number");
mp_raise_TypeError(MP_ERROR_TEXT("can't truncate-divide a complex number"));
case MP_BINARY_OP_TRUE_DIVIDE:
case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
if (rhs_imag == 0) {
if (rhs_real == 0) {
mp_raise_msg(&mp_type_ZeroDivisionError, "complex divide by zero");
mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("complex divide by zero"));
}
lhs_real /= rhs_real;
lhs_imag /= rhs_real;
@@ -210,7 +219,7 @@ mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_flo
lhs_imag = -lhs_real / rhs_imag;
lhs_real = real;
} else {
mp_float_t rhs_len_sq = rhs_real*rhs_real + rhs_imag*rhs_imag;
mp_float_t rhs_len_sq = rhs_real * rhs_real + rhs_imag * rhs_imag;
rhs_real /= rhs_len_sq;
rhs_imag /= -rhs_len_sq;
goto multiply;
@@ -224,12 +233,12 @@ mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_flo
// = exp( (x2*ln1 - y2*arg1) + i*(y2*ln1 + x2*arg1) )
// = exp(x3 + i*y3)
// = exp(x3)*(cos(y3) + i*sin(y3))
mp_float_t abs1 = MICROPY_FLOAT_C_FUN(sqrt)(lhs_real*lhs_real + lhs_imag*lhs_imag);
mp_float_t abs1 = MICROPY_FLOAT_C_FUN(sqrt)(lhs_real * lhs_real + lhs_imag * lhs_imag);
if (abs1 == 0) {
if (rhs_imag == 0 && rhs_real >= 0) {
lhs_real = (rhs_real == 0);
} else {
mp_raise_msg(&mp_type_ZeroDivisionError, "0.0 to a complex power");
mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("0.0 to a complex power"));
}
} else {
mp_float_t ln1 = MICROPY_FLOAT_C_FUN(log)(abs1);
@@ -243,7 +252,8 @@ mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_flo
break;
}
case MP_BINARY_OP_EQUAL: return mp_obj_new_bool(lhs_real == rhs_real && lhs_imag == rhs_imag);
case MP_BINARY_OP_EQUAL:
return mp_obj_new_bool(lhs_real == rhs_real && lhs_imag == rhs_imag);
default:
return MP_OBJ_NULL; // op not supported

View File

@@ -102,7 +102,7 @@ STATIC mp_obj_t mp_obj_deque_append(mp_obj_t self_in, mp_obj_t arg) {
}
if (self->flags & FLAG_CHECK_OVERFLOW && new_i_put == self->i_get) {
mp_raise_msg(&mp_type_IndexError, "full");
mp_raise_msg(&mp_type_IndexError, MP_ERROR_TEXT("full"));
}
self->items[self->i_put] = arg;
@@ -122,7 +122,7 @@ STATIC mp_obj_t deque_popleft(mp_obj_t self_in) {
mp_obj_deque_t *self = MP_OBJ_TO_PTR(self_in);
if (self->i_get == self->i_put) {
mp_raise_msg(&mp_type_IndexError, "empty");
mp_raise_msg(&mp_type_IndexError, MP_ERROR_TEXT("empty"));
}
mp_obj_t ret = self->items[self->i_get];
@@ -161,7 +161,7 @@ const mp_obj_type_t mp_type_deque = {
.name = MP_QSTR_deque,
.make_new = deque_make_new,
.unary_op = deque_unary_op,
.locals_dict = (mp_obj_dict_t*)&deque_locals_dict,
.locals_dict = (mp_obj_dict_t *)&deque_locals_dict,
};
#endif // MICROPY_PY_COLLECTIONS_DEQUE

View File

@@ -33,7 +33,17 @@
#include "py/objtype.h"
#include "py/objstr.h"
#define mp_obj_is_dict_type(o) (mp_obj_is_obj(o) && ((mp_obj_base_t*)MP_OBJ_TO_PTR(o))->type->make_new == dict_make_new)
const mp_obj_dict_t mp_const_empty_dict_obj = {
.base = { .type = &mp_type_dict },
.map = {
.all_keys_are_qstrs = 0,
.is_fixed = 1,
.is_ordered = 1,
.used = 0,
.alloc = 0,
.table = NULL,
}
};
STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
@@ -44,21 +54,30 @@ STATIC mp_map_elem_t *dict_iter_next(mp_obj_dict_t *dict, size_t *cur) {
size_t max = dict->map.alloc;
mp_map_t *map = &dict->map;
for (size_t i = *cur; i < max; i++) {
size_t i = *cur;
for (; i < max; i++) {
if (mp_map_slot_is_filled(map, i)) {
*cur = i + 1;
return &(map->table[i]);
}
}
assert(map->used == 0 || i == max);
return NULL;
}
STATIC void dict_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
bool first = true;
const char *item_separator = ", ";
const char *key_separator = ": ";
if (!(MICROPY_PY_UJSON && kind == PRINT_JSON)) {
kind = PRINT_REPR;
} else {
#if MICROPY_PY_UJSON_SEPARATORS
item_separator = MP_PRINT_GET_EXT(print)->item_separator;
key_separator = MP_PRINT_GET_EXT(print)->key_separator;
#endif
}
if (MICROPY_PY_COLLECTIONS_ORDEREDDICT && self->base.type != &mp_type_dict && kind != PRINT_JSON) {
mp_printf(print, "%q(", self->base.type->name);
@@ -68,7 +87,7 @@ STATIC void dict_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_
mp_map_elem_t *next = NULL;
while ((next = dict_iter_next(self, &cur)) != NULL) {
if (!first) {
mp_print_str(print, ", ");
mp_print_str(print, item_separator);
}
first = false;
bool add_quote = MICROPY_PY_UJSON && kind == PRINT_JSON && !mp_obj_is_str_or_bytes(next->key);
@@ -79,7 +98,7 @@ STATIC void dict_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_
if (add_quote) {
mp_print_str(print, "\"");
}
mp_print_str(print, ": ");
mp_print_str(print, key_separator);
mp_obj_print_helper(print, next->value, kind);
}
mp_print_str(print, "}");
@@ -88,7 +107,7 @@ STATIC void dict_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_
}
}
STATIC mp_obj_t dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_obj_t mp_obj_dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
mp_obj_t dict_out = mp_obj_new_dict(0);
mp_obj_dict_t *dict = MP_OBJ_TO_PTR(dict_out);
dict->base.type = type;
@@ -109,15 +128,18 @@ STATIC mp_obj_t dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n
STATIC mp_obj_t dict_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
switch (op) {
case MP_UNARY_OP_BOOL: return mp_obj_new_bool(self->map.used != 0);
case MP_UNARY_OP_LEN: return MP_OBJ_NEW_SMALL_INT(self->map.used);
case MP_UNARY_OP_BOOL:
return mp_obj_new_bool(self->map.used != 0);
case MP_UNARY_OP_LEN:
return MP_OBJ_NEW_SMALL_INT(self->map.used);
#if MICROPY_PY_SYS_GETSIZEOF
case MP_UNARY_OP_SIZEOF: {
size_t sz = sizeof(*self) + sizeof(*self->map.table) * self->map.alloc;
return MP_OBJ_NEW_SMALL_INT(sz);
}
#endif
default: return MP_OBJ_NULL; // op not supported
default:
return MP_OBJ_NULL; // op not supported
}
}
@@ -141,8 +163,9 @@ STATIC mp_obj_t dict_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_
}
}
return e1 == NULL && e2 == NULL ? mp_const_true : mp_const_false;
} else
}
#endif
if (mp_obj_is_type(rhs_in, &mp_type_dict)) {
mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in);
if (o->map.used != rhs->map.used) {
@@ -174,7 +197,7 @@ mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index) {
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP);
if (elem == NULL) {
nlr_raise(mp_obj_new_exception_arg1(&mp_type_KeyError, index));
mp_raise_type_arg(&mp_type_KeyError, index);
} else {
return elem->value;
}
@@ -190,7 +213,7 @@ STATIC mp_obj_t dict_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP);
if (elem == NULL) {
nlr_raise(mp_obj_new_exception_arg1(&mp_type_KeyError, index));
mp_raise_type_arg(&mp_type_KeyError, index);
} else {
return elem->value;
}
@@ -211,7 +234,7 @@ STATIC void mp_ensure_not_fixed(const mp_obj_dict_t *dict) {
}
STATIC mp_obj_t dict_clear(mp_obj_t self_in) {
mp_check_self(mp_obj_is_dict_type(self_in));
mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
mp_ensure_not_fixed(self);
@@ -221,8 +244,8 @@ STATIC mp_obj_t dict_clear(mp_obj_t self_in) {
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_clear_obj, dict_clear);
STATIC mp_obj_t dict_copy(mp_obj_t self_in) {
mp_check_self(mp_obj_is_dict_type(self_in));
mp_obj_t mp_obj_dict_copy(mp_obj_t self_in) {
mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
mp_obj_t other_out = mp_obj_new_dict(self->map.alloc);
mp_obj_dict_t *other = MP_OBJ_TO_PTR(other_out);
@@ -234,7 +257,7 @@ STATIC mp_obj_t dict_copy(mp_obj_t self_in) {
memcpy(other->map.table, self->map.table, self->map.alloc * sizeof(mp_map_elem_t));
return other_out;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_copy_obj, dict_copy);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_copy_obj, mp_obj_dict_copy);
#if MICROPY_PY_BUILTINS_DICT_FROMKEYS
// this is a classmethod
@@ -269,7 +292,7 @@ STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(dict_fromkeys_obj, MP_ROM_PTR(&dict_fromk
#endif
STATIC mp_obj_t dict_get_helper(size_t n_args, const mp_obj_t *args, mp_map_lookup_kind_t lookup_kind) {
mp_check_self(mp_obj_is_dict_type(args[0]));
mp_check_self(mp_obj_is_dict_or_ordereddict(args[0]));
mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
if (lookup_kind != MP_MAP_LOOKUP) {
mp_ensure_not_fixed(self);
@@ -279,7 +302,7 @@ STATIC mp_obj_t dict_get_helper(size_t n_args, const mp_obj_t *args, mp_map_look
if (elem == NULL || elem->value == MP_OBJ_NULL) {
if (n_args == 2) {
if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
nlr_raise(mp_obj_new_exception_arg1(&mp_type_KeyError, args[1]));
mp_raise_type_arg(&mp_type_KeyError, args[1]);
} else {
value = mp_const_none;
}
@@ -314,14 +337,20 @@ STATIC mp_obj_t dict_setdefault(size_t n_args, const mp_obj_t *args) {
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_setdefault_obj, 2, 3, dict_setdefault);
STATIC mp_obj_t dict_popitem(mp_obj_t self_in) {
mp_check_self(mp_obj_is_dict_type(self_in));
mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
mp_ensure_not_fixed(self);
size_t cur = 0;
mp_map_elem_t *next = dict_iter_next(self, &cur);
if (next == NULL) {
mp_raise_msg(&mp_type_KeyError, "popitem(): dictionary is empty");
if (self->map.used == 0) {
mp_raise_msg(&mp_type_KeyError, MP_ERROR_TEXT("popitem(): dictionary is empty"));
}
size_t cur = 0;
#if MICROPY_PY_COLLECTIONS_ORDEREDDICT
if (self->map.is_ordered) {
cur = self->map.used - 1;
}
#endif
mp_map_elem_t *next = dict_iter_next(self, &cur);
assert(next);
self->map.used--;
mp_obj_t items[] = {next->key, next->value};
next->key = MP_OBJ_SENTINEL; // must mark key as sentinel to indicate that it was deleted
@@ -333,7 +362,7 @@ STATIC mp_obj_t dict_popitem(mp_obj_t self_in) {
STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_popitem_obj, dict_popitem);
STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
mp_check_self(mp_obj_is_dict_type(args[0]));
mp_check_self(mp_obj_is_dict_or_ordereddict(args[0]));
mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
mp_ensure_not_fixed(self);
@@ -342,12 +371,12 @@ STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwarg
if (n_args == 2) {
// given a positional argument
if (mp_obj_is_dict_type(args[1])) {
if (mp_obj_is_dict_or_ordereddict(args[1])) {
// update from other dictionary (make sure other is not self)
if (args[1] != args[0]) {
size_t cur = 0;
mp_map_elem_t *elem = NULL;
while ((elem = dict_iter_next((mp_obj_dict_t*)MP_OBJ_TO_PTR(args[1]), &cur)) != NULL) {
while ((elem = dict_iter_next((mp_obj_dict_t *)MP_OBJ_TO_PTR(args[1]), &cur)) != NULL) {
mp_map_lookup(&self->map, elem->key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = elem->value;
}
}
@@ -363,7 +392,7 @@ STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwarg
if (key == MP_OBJ_STOP_ITERATION
|| value == MP_OBJ_STOP_ITERATION
|| stop != MP_OBJ_STOP_ITERATION) {
mp_raise_ValueError("dict update sequence has wrong length");
mp_raise_ValueError(MP_ERROR_TEXT("dict update sequence has wrong length"));
} else {
mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
}
@@ -386,8 +415,8 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_KW(dict_update_obj, 1, dict_update);
/******************************************************************************/
/* dict views */
STATIC const mp_obj_type_t dict_view_type;
STATIC const mp_obj_type_t dict_view_it_type;
STATIC const mp_obj_type_t mp_type_dict_view;
STATIC const mp_obj_type_t mp_type_dict_view_it;
typedef enum _mp_dict_view_kind_t {
MP_DICT_VIEW_ITEMS,
@@ -411,7 +440,7 @@ typedef struct _mp_obj_dict_view_t {
} mp_obj_dict_view_t;
STATIC mp_obj_t dict_view_it_iternext(mp_obj_t self_in) {
mp_check_self(mp_obj_is_type(self_in, &dict_view_it_type));
mp_check_self(mp_obj_is_type(self_in, &mp_type_dict_view_it));
mp_obj_dict_view_it_t *self = MP_OBJ_TO_PTR(self_in);
mp_map_elem_t *next = dict_iter_next(MP_OBJ_TO_PTR(self->dict), &self->cur);
@@ -432,7 +461,7 @@ STATIC mp_obj_t dict_view_it_iternext(mp_obj_t self_in) {
}
}
STATIC const mp_obj_type_t dict_view_it_type = {
STATIC const mp_obj_type_t mp_type_dict_view_it = {
{ &mp_type_type },
.name = MP_QSTR_iterator,
.getiter = mp_identity_getiter,
@@ -441,10 +470,10 @@ STATIC const mp_obj_type_t dict_view_it_type = {
STATIC mp_obj_t dict_view_getiter(mp_obj_t view_in, mp_obj_iter_buf_t *iter_buf) {
assert(sizeof(mp_obj_dict_view_it_t) <= sizeof(mp_obj_iter_buf_t));
mp_check_self(mp_obj_is_type(view_in, &dict_view_type));
mp_check_self(mp_obj_is_type(view_in, &mp_type_dict_view));
mp_obj_dict_view_t *view = MP_OBJ_TO_PTR(view_in);
mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t*)iter_buf;
o->base.type = &dict_view_it_type;
mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t *)iter_buf;
o->base.type = &mp_type_dict_view_it;
o->kind = view->kind;
o->dict = view->dict;
o->cur = 0;
@@ -453,7 +482,7 @@ STATIC mp_obj_t dict_view_getiter(mp_obj_t view_in, mp_obj_iter_buf_t *iter_buf)
STATIC void dict_view_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
(void)kind;
mp_check_self(mp_obj_is_type(self_in, &dict_view_type));
mp_check_self(mp_obj_is_type(self_in, &mp_type_dict_view));
mp_obj_dict_view_t *self = MP_OBJ_TO_PTR(self_in);
bool first = true;
mp_print_str(print, mp_dict_view_names[self->kind]);
@@ -483,7 +512,7 @@ STATIC mp_obj_t dict_view_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t
return dict_binary_op(op, o->dict, rhs_in);
}
STATIC const mp_obj_type_t dict_view_type = {
STATIC const mp_obj_type_t mp_type_dict_view = {
{ &mp_type_type },
.name = MP_QSTR_dict_view,
.print = dict_view_print,
@@ -493,14 +522,14 @@ STATIC const mp_obj_type_t dict_view_type = {
STATIC mp_obj_t mp_obj_new_dict_view(mp_obj_t dict, mp_dict_view_kind_t kind) {
mp_obj_dict_view_t *o = m_new_obj(mp_obj_dict_view_t);
o->base.type = &dict_view_type;
o->base.type = &mp_type_dict_view;
o->dict = dict;
o->kind = kind;
return MP_OBJ_FROM_PTR(o);
}
STATIC mp_obj_t dict_view(mp_obj_t self_in, mp_dict_view_kind_t kind) {
mp_check_self(mp_obj_is_dict_type(self_in));
mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
return mp_obj_new_dict_view(self_in, kind);
}
@@ -524,9 +553,9 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_values_obj, dict_values);
STATIC mp_obj_t dict_getiter(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf) {
assert(sizeof(mp_obj_dict_view_it_t) <= sizeof(mp_obj_iter_buf_t));
mp_check_self(mp_obj_is_dict_type(self_in));
mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t*)iter_buf;
o->base.type = &dict_view_it_type;
mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t *)iter_buf;
o->base.type = &mp_type_dict_view_it;
o->kind = MP_DICT_VIEW_KEYS;
o->dict = self_in;
o->cur = 0;
@@ -561,12 +590,12 @@ const mp_obj_type_t mp_type_dict = {
{ &mp_type_type },
.name = MP_QSTR_dict,
.print = dict_print,
.make_new = dict_make_new,
.make_new = mp_obj_dict_make_new,
.unary_op = dict_unary_op,
.binary_op = dict_binary_op,
.subscr = dict_subscr,
.getiter = dict_getiter,
.locals_dict = (mp_obj_dict_t*)&dict_locals_dict,
.locals_dict = (mp_obj_dict_t *)&dict_locals_dict,
};
#if MICROPY_PY_COLLECTIONS_ORDEREDDICT
@@ -574,13 +603,13 @@ const mp_obj_type_t mp_type_ordereddict = {
{ &mp_type_type },
.name = MP_QSTR_OrderedDict,
.print = dict_print,
.make_new = dict_make_new,
.make_new = mp_obj_dict_make_new,
.unary_op = dict_unary_op,
.binary_op = dict_binary_op,
.subscr = dict_subscr,
.getiter = dict_getiter,
.parent = &mp_type_dict,
.locals_dict = (mp_obj_dict_t*)&dict_locals_dict,
.locals_dict = (mp_obj_dict_t *)&dict_locals_dict,
};
#endif
@@ -601,7 +630,7 @@ size_t mp_obj_dict_len(mp_obj_t self_in) {
}
mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value) {
mp_check_self(mp_obj_is_dict_type(self_in));
mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
mp_ensure_not_fixed(self);
mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;

View File

@@ -40,7 +40,7 @@ typedef struct _mp_obj_enumerate_t {
STATIC mp_obj_t enumerate_iternext(mp_obj_t self_in);
STATIC mp_obj_t enumerate_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
#if MICROPY_CPYTHON_COMPAT
#if MICROPY_CPYTHON_COMPAT
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_iterable, MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} },
{ MP_QSTR_start, MP_ARG_INT, {.u_int = 0} },
@@ -51,20 +51,20 @@ STATIC mp_obj_t enumerate_make_new(const mp_obj_type_t *type, size_t n_args, siz
mp_arg_val_t iterable, start;
} arg_vals;
mp_arg_parse_all_kw_array(n_args, n_kw, args,
MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t*)&arg_vals);
MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t *)&arg_vals);
// create enumerate object
mp_obj_enumerate_t *o = m_new_obj(mp_obj_enumerate_t);
o->base.type = type;
o->iter = mp_getiter(arg_vals.iterable.u_obj, NULL);
o->cur = arg_vals.start.u_int;
#else
#else
mp_arg_check_num(n_args, n_kw, 1, 2, false);
mp_obj_enumerate_t *o = m_new_obj(mp_obj_enumerate_t);
o->base.type = type;
o->iter = mp_getiter(args[0], NULL);
o->cur = n_args > 1 ? mp_obj_get_int(args[1]) : 0;
#endif
#endif
return MP_OBJ_FROM_PTR(o);
}

View File

@@ -38,6 +38,16 @@
#include "py/gc.h"
#include "py/mperrno.h"
#if MICROPY_ROM_TEXT_COMPRESSION && !defined(NO_QSTR)
// Extract the MP_MAX_UNCOMPRESSED_TEXT_LEN macro from "genhdr/compressed.data.h".
// Only need this if compression enabled and in a regular build (i.e. not during QSTR extraction).
#define MP_MATCH_COMPRESSED(...) // Ignore
#define MP_COMPRESSED_DATA(...) // Ignore
#include "genhdr/compressed.data.h"
#undef MP_MATCH_COMPRESSED
#undef MP_COMPRESSED_DATA
#endif
// Number of items per traceback entry (file, line, block)
#define TRACEBACK_ENTRY_LEN (3)
@@ -57,8 +67,9 @@
#define EMG_BUF_TUPLE_OFFSET (EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE)
#define EMG_BUF_TUPLE_SIZE(n_args) (sizeof(mp_obj_tuple_t) + n_args * sizeof(mp_obj_t))
#define EMG_BUF_STR_OFFSET (EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(1))
#define EMG_BUF_STR_BUF_OFFSET (EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t))
# if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
#if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
#define mp_emergency_exception_buf_size MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
void mp_init_emergency_exception_buf(void) {
@@ -100,6 +111,49 @@ mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) {
#endif
#endif // MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
STATIC mp_obj_exception_t *get_native_exception(mp_obj_t self_in) {
assert(mp_obj_is_exception_instance(self_in));
if (mp_obj_is_native_exception_instance(self_in)) {
return MP_OBJ_TO_PTR(self_in);
} else {
return MP_OBJ_TO_PTR(((mp_obj_instance_t *)MP_OBJ_TO_PTR(self_in))->subobj[0]);
}
}
STATIC void decompress_error_text_maybe(mp_obj_exception_t *o) {
#if MICROPY_ROM_TEXT_COMPRESSION
if (o->args->len == 1 && mp_obj_is_type(o->args->items[0], &mp_type_str)) {
mp_obj_str_t *o_str = MP_OBJ_TO_PTR(o->args->items[0]);
if (MP_IS_COMPRESSED_ROM_STRING(o_str->data)) {
byte *buf = m_new_maybe(byte, MP_MAX_UNCOMPRESSED_TEXT_LEN + 1);
if (!buf) {
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
// Try and use the emergency exception buf if enough space is available.
buf = (byte *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_STR_BUF_OFFSET);
size_t avail = (uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size - buf;
if (avail < MP_MAX_UNCOMPRESSED_TEXT_LEN + 1) {
// No way to decompress, fallback to no message text.
o->args = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
return;
}
#else
o->args = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
return;
#endif
}
mp_decompress_rom_string(buf, (mp_rom_error_text_t)o_str->data);
o_str->data = buf;
o_str->len = strlen((const char *)buf);
o_str->hash = 0;
}
// Lazily compute the string hash.
if (o_str->hash == 0) {
o_str->hash = qstr_compute_hash(o_str->data, o_str->len);
}
}
#endif
}
void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
mp_obj_exception_t *o = MP_OBJ_TO_PTR(o_in);
mp_print_kind_t k = kind & ~PRINT_EXC_SUBCLASS;
@@ -112,25 +166,35 @@ void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kin
mp_print_str(print, ": ");
}
decompress_error_text_maybe(o);
if (k == PRINT_STR || k == PRINT_EXC) {
if (o->args == NULL || o->args->len == 0) {
mp_print_str(print, "");
return;
} else if (o->args->len == 1) {
#if MICROPY_PY_UERRNO
// try to provide a nice OSError error message
if (o->base.type == &mp_type_OSError && mp_obj_is_small_int(o->args->items[0])) {
qstr qst = mp_errno_to_str(o->args->items[0]);
if (qst != MP_QSTRnull) {
mp_printf(print, "[Errno " INT_FMT "] %q", MP_OBJ_SMALL_INT_VALUE(o->args->items[0]), qst);
return;
}
#if MICROPY_PY_UERRNO
// try to provide a nice OSError error message
if (o->base.type == &mp_type_OSError && o->args->len > 0 && o->args->len < 3 && mp_obj_is_small_int(o->args->items[0])) {
qstr qst = mp_errno_to_str(o->args->items[0]);
if (qst != MP_QSTRnull) {
mp_printf(print, "[Errno " INT_FMT "] %q", MP_OBJ_SMALL_INT_VALUE(o->args->items[0]), qst);
if (o->args->len > 1) {
mp_print_str(print, ": ");
mp_obj_print_helper(print, o->args->items[1], PRINT_STR);
}
return;
}
#endif
}
#endif
if (o->args->len == 1) {
mp_obj_print_helper(print, o->args->items[0], PRINT_STR);
return;
}
}
mp_obj_tuple_print(print, MP_OBJ_FROM_PTR(o->args), kind);
}
@@ -150,7 +214,7 @@ mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type, size_t n_args, siz
mp_obj_tuple_t *o_tuple;
if (n_args == 0) {
// No args, can use the empty tuple straightaway
o_tuple = (mp_obj_tuple_t*)&mp_const_empty_tuple_obj;
o_tuple = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
} else {
// Try to allocate memory for the tuple containing the args
o_tuple = m_new_obj_var_maybe(mp_obj_tuple_t, mp_obj_t, n_args);
@@ -160,15 +224,15 @@ mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type, size_t n_args, siz
// reserved room (after the traceback data) for a tuple with 1 element.
// Otherwise we are free to use the whole buffer after the traceback data.
if (o_tuple == NULL && mp_emergency_exception_buf_size >=
EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(n_args)) {
o_tuple = (mp_obj_tuple_t*)
((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_TUPLE_OFFSET);
(mp_int_t)(EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(n_args))) {
o_tuple = (mp_obj_tuple_t *)
((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_TUPLE_OFFSET);
}
#endif
if (o_tuple == NULL) {
// No memory for a tuple, fallback to an empty tuple
o_tuple = (mp_obj_tuple_t*)&mp_const_empty_tuple_obj;
o_tuple = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
} else {
// Have memory for a tuple so populate it
o_tuple->base.type = &mp_type_tuple;
@@ -185,10 +249,11 @@ mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type, size_t n_args, siz
// Get exception "value" - that is, first argument, or None
mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in) {
mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in);
mp_obj_exception_t *self = get_native_exception(self_in);
if (self->args->len == 0) {
return mp_const_none;
} else {
decompress_error_text_maybe(self);
return self->args->items[0];
}
}
@@ -210,8 +275,11 @@ void mp_obj_exception_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
return;
}
if (attr == MP_QSTR_args) {
decompress_error_text_maybe(self);
dest[0] = MP_OBJ_FROM_PTR(self->args);
} else if (self->base.type == &mp_type_StopIteration && attr == MP_QSTR_value) {
} else if (attr == MP_QSTR_value || attr == MP_QSTR_errno) {
// These are aliases for args[0]: .value for StopIteration and .errno for OSError.
// For efficiency let these attributes apply to all exception instances.
dest[0] = mp_obj_exception_get_value(self_in);
}
}
@@ -224,6 +292,8 @@ const mp_obj_type_t mp_type_BaseException = {
.attr = mp_obj_exception_attr,
};
// *FORMAT-OFF*
// List of all exceptions, arranged as in the table at:
// http://docs.python.org/3/library/exceptions.html
MP_DEFINE_EXCEPTION(SystemExit, BaseException)
@@ -241,10 +311,8 @@ MP_DEFINE_EXCEPTION(Exception, BaseException)
MP_DEFINE_EXCEPTION(AssertionError, Exception)
MP_DEFINE_EXCEPTION(AttributeError, Exception)
//MP_DEFINE_EXCEPTION(BufferError, Exception)
//MP_DEFINE_EXCEPTION(EnvironmentError, Exception) use OSError instead
MP_DEFINE_EXCEPTION(EOFError, Exception)
MP_DEFINE_EXCEPTION(ImportError, Exception)
//MP_DEFINE_EXCEPTION(IOError, Exception) use OSError instead
MP_DEFINE_EXCEPTION(LookupError, Exception)
MP_DEFINE_EXCEPTION(IndexError, LookupError)
MP_DEFINE_EXCEPTION(KeyError, LookupError)
@@ -254,9 +322,6 @@ MP_DEFINE_EXCEPTION(Exception, BaseException)
MP_DEFINE_EXCEPTION(UnboundLocalError, NameError)
*/
MP_DEFINE_EXCEPTION(OSError, Exception)
#if MICROPY_PY_BUILTINS_TIMEOUTERROR
MP_DEFINE_EXCEPTION(TimeoutError, OSError)
#endif
/*
MP_DEFINE_EXCEPTION(BlockingIOError, OSError)
MP_DEFINE_EXCEPTION(ChildProcessError, OSError)
@@ -270,6 +335,7 @@ MP_DEFINE_EXCEPTION(Exception, BaseException)
MP_DEFINE_EXCEPTION(NotADirectoryError, OSError)
MP_DEFINE_EXCEPTION(PermissionError, OSError)
MP_DEFINE_EXCEPTION(ProcessLookupError, OSError)
MP_DEFINE_EXCEPTION(TimeoutError, OSError)
MP_DEFINE_EXCEPTION(FileExistsError, OSError)
MP_DEFINE_EXCEPTION(FileNotFoundError, OSError)
MP_DEFINE_EXCEPTION(ReferenceError, Exception)
@@ -305,21 +371,21 @@ MP_DEFINE_EXCEPTION(Exception, BaseException)
MP_DEFINE_EXCEPTION(ResourceWarning, Warning)
*/
mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type) {
return mp_obj_new_exception_args(exc_type, 0, NULL);
}
// *FORMAT-ON*
// "Optimized" version for common(?) case of having 1 exception arg
mp_obj_t mp_obj_new_exception_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg) {
return mp_obj_new_exception_args(exc_type, 1, &arg);
mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type) {
assert(exc_type->make_new == mp_obj_exception_make_new);
return mp_obj_exception_make_new(exc_type, 0, 0, NULL);
}
mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, size_t n_args, const mp_obj_t *args) {
assert(exc_type->make_new == mp_obj_exception_make_new);
return exc_type->make_new(exc_type, n_args, 0, args);
return mp_obj_exception_make_new(exc_type, n_args, 0, args);
}
mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg) {
#if MICROPY_ERROR_REPORTING != MICROPY_ERROR_REPORTING_NONE
mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, mp_rom_error_text_t msg) {
// Check that the given type is an exception type
assert(exc_type->make_new == mp_obj_exception_make_new);
@@ -331,8 +397,8 @@ mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg
// that buffer to store the string object, reserving room at the start for the
// traceback and 1-tuple.
if (o_str == NULL
&& mp_emergency_exception_buf_size >= EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t)) {
o_str = (mp_obj_str_t*)((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf)
&& mp_emergency_exception_buf_size >= (mp_int_t)(EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t))) {
o_str = (mp_obj_str_t *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf)
+ EMG_BUF_STR_OFFSET);
}
#endif
@@ -344,9 +410,13 @@ mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const char *msg
// Create the string object and call mp_obj_exception_make_new to create the exception
o_str->base.type = &mp_type_str;
o_str->len = strlen(msg);
o_str->data = (const byte*)msg;
o_str->len = strlen((const char *)msg);
o_str->data = (const byte *)msg;
#if MICROPY_ROM_TEXT_COMPRESSION
o_str->hash = 0; // will be computed only if string object is accessed
#else
o_str->hash = qstr_compute_hash(o_str->data, o_str->len);
#endif
mp_obj_t arg = MP_OBJ_FROM_PTR(o_str);
return mp_obj_exception_make_new(exc_type, 1, 0, &arg);
}
@@ -384,7 +454,15 @@ STATIC void exc_add_strn(void *data, const char *str, size_t len) {
pr->len += len;
}
mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char *fmt, ...) {
mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, ...) {
va_list args;
va_start(args, fmt);
mp_obj_t exc = mp_obj_new_exception_msg_vlist(exc_type, fmt, args);
va_end(args);
return exc;
}
mp_obj_t mp_obj_new_exception_msg_vlist(const mp_obj_type_t *exc_type, mp_rom_error_text_t fmt, va_list args) {
assert(fmt != NULL);
// Check that the given type is an exception type
@@ -392,7 +470,7 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
// Try to allocate memory for the message
mp_obj_str_t *o_str = m_new_obj_maybe(mp_obj_str_t);
size_t o_str_alloc = strlen(fmt) + 1;
size_t o_str_alloc = strlen((const char *)fmt) + 1;
byte *o_str_buf = m_new_maybe(byte, o_str_alloc);
bool used_emg_buf = false;
@@ -401,34 +479,41 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
// that buffer to store the string object and its data (at least 16 bytes for
// the string data), reserving room at the start for the traceback and 1-tuple.
if ((o_str == NULL || o_str_buf == NULL)
&& mp_emergency_exception_buf_size >= EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t) + 16) {
&& mp_emergency_exception_buf_size >= (mp_int_t)(EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t) + 16)) {
used_emg_buf = true;
o_str = (mp_obj_str_t*)((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf)
+ EMG_BUF_STR_OFFSET);
o_str_buf = (byte*)&o_str[1];
o_str_alloc = (uint8_t*)MP_STATE_VM(mp_emergency_exception_buf)
+ mp_emergency_exception_buf_size - o_str_buf;
o_str = (mp_obj_str_t *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_STR_OFFSET);
o_str_buf = (byte *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_STR_BUF_OFFSET);
o_str_alloc = (uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + mp_emergency_exception_buf_size - o_str_buf;
}
#endif
if (o_str == NULL) {
// No memory for the string object so create the exception with no args
// No memory for the string object so create the exception with no args.
// The exception will only have a type and no message (compression is irrelevant).
return mp_obj_exception_make_new(exc_type, 0, 0, NULL);
}
if (o_str_buf == NULL) {
// No memory for the string buffer: assume that the fmt string is in ROM
// and use that data as the data of the string
// and use that data as the data of the string.
// The string will point directly to the compressed data -- will need to be decompressed
// prior to display (this case is identical to mp_obj_new_exception_msg above).
o_str->len = o_str_alloc - 1; // will be equal to strlen(fmt)
o_str->data = (const byte*)fmt;
o_str->data = (const byte *)fmt;
} else {
// We have some memory to format the string
// We have some memory to format the string.
// TODO: Optimise this to format-while-decompressing (and not require the temp stack space).
struct _exc_printer_t exc_pr = {!used_emg_buf, o_str_alloc, 0, o_str_buf};
mp_print_t print = {&exc_pr, exc_add_strn};
va_list ap;
va_start(ap, fmt);
mp_vprintf(&print, fmt, ap);
va_end(ap);
const char *fmt2 = (const char *)fmt;
#if MICROPY_ROM_TEXT_COMPRESSION
byte decompressed[MP_MAX_UNCOMPRESSED_TEXT_LEN];
if (MP_IS_COMPRESSED_ROM_STRING(fmt)) {
mp_decompress_rom_string(decompressed, fmt);
fmt2 = (const char *)decompressed;
}
#endif
mp_vprintf(&print, fmt2, args);
exc_pr.buf[exc_pr.len] = '\0';
o_str->len = exc_pr.len;
o_str->data = exc_pr.buf;
@@ -436,11 +521,17 @@ mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const char
// Create the string object and call mp_obj_exception_make_new to create the exception
o_str->base.type = &mp_type_str;
#if MICROPY_ROM_TEXT_COMPRESSION
o_str->hash = 0; // will be computed only if string object is accessed
#else
o_str->hash = qstr_compute_hash(o_str->data, o_str->len);
#endif
mp_obj_t arg = MP_OBJ_FROM_PTR(o_str);
return mp_obj_exception_make_new(exc_type, 1, 0, &arg);
}
#endif
// return true if the given object is an exception type
bool mp_obj_is_exception_type(mp_obj_t self_in) {
if (mp_obj_is_type(self_in, &mp_type_type)) {
@@ -471,25 +562,15 @@ bool mp_obj_exception_match(mp_obj_t exc, mp_const_obj_t exc_type) {
// traceback handling functions
#define GET_NATIVE_EXCEPTION(self, self_in) \
/* make sure self_in is an exception instance */ \
assert(mp_obj_is_exception_instance(self_in)); \
mp_obj_exception_t *self; \
if (mp_obj_is_native_exception_instance(self_in)) { \
self = MP_OBJ_TO_PTR(self_in); \
} else { \
self = MP_OBJ_TO_PTR(((mp_obj_instance_t*)MP_OBJ_TO_PTR(self_in))->subobj[0]); \
}
void mp_obj_exception_clear_traceback(mp_obj_t self_in) {
GET_NATIVE_EXCEPTION(self, self_in);
mp_obj_exception_t *self = get_native_exception(self_in);
// just set the traceback to the null object
// we don't want to call any memory management functions here
self->traceback_data = NULL;
}
void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qstr block) {
GET_NATIVE_EXCEPTION(self, self_in);
mp_obj_exception_t *self = get_native_exception(self_in);
// append this traceback info to traceback data
// if memory allocation fails (eg because gc is locked), just return
@@ -498,9 +579,9 @@ void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qs
self->traceback_data = m_new_maybe(size_t, TRACEBACK_ENTRY_LEN);
if (self->traceback_data == NULL) {
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
if (mp_emergency_exception_buf_size >= EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE) {
if (mp_emergency_exception_buf_size >= (mp_int_t)(EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE)) {
// There is room in the emergency buffer for traceback data
size_t *tb = (size_t*)((uint8_t*)MP_STATE_VM(mp_emergency_exception_buf)
size_t *tb = (size_t *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf)
+ EMG_BUF_TRACEBACK_OFFSET);
self->traceback_data = tb;
self->traceback_alloc = EMG_BUF_TRACEBACK_SIZE / sizeof(size_t);
@@ -519,7 +600,7 @@ void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qs
self->traceback_len = 0;
} else if (self->traceback_len + TRACEBACK_ENTRY_LEN > self->traceback_alloc) {
#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
if (self->traceback_data == (size_t*)MP_STATE_VM(mp_emergency_exception_buf)) {
if (self->traceback_data == (size_t *)MP_STATE_VM(mp_emergency_exception_buf)) {
// Can't resize the emergency buffer
return;
}
@@ -542,7 +623,7 @@ void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qs
}
void mp_obj_exception_get_traceback(mp_obj_t self_in, size_t *n, size_t **values) {
GET_NATIVE_EXCEPTION(self, self_in);
mp_obj_exception_t *self = get_native_exception(self_in);
if (self->traceback_data == NULL) {
*n = 0;

View File

@@ -41,13 +41,13 @@ void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kin
void mp_obj_exception_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
#define MP_DEFINE_EXCEPTION(exc_name, base_name) \
const mp_obj_type_t mp_type_ ## exc_name = { \
{ &mp_type_type }, \
.name = MP_QSTR_ ## exc_name, \
.print = mp_obj_exception_print, \
.make_new = mp_obj_exception_make_new, \
.attr = mp_obj_exception_attr, \
.parent = &mp_type_ ## base_name, \
};
const mp_obj_type_t mp_type_##exc_name = { \
{ &mp_type_type }, \
.name = MP_QSTR_##exc_name, \
.print = mp_obj_exception_print, \
.make_new = mp_obj_exception_make_new, \
.attr = mp_obj_exception_attr, \
.parent = &mp_type_##base_name, \
};
#endif // MICROPY_INCLUDED_PY_OBJEXCEPT_H

View File

@@ -52,29 +52,17 @@ typedef struct _mp_obj_float_t {
mp_float_t value;
} mp_obj_float_t;
const mp_obj_float_t mp_const_float_e_obj = {{&mp_type_float}, M_E};
const mp_obj_float_t mp_const_float_pi_obj = {{&mp_type_float}, M_PI};
const mp_obj_float_t mp_const_float_e_obj = {{&mp_type_float}, (mp_float_t)M_E};
const mp_obj_float_t mp_const_float_pi_obj = {{&mp_type_float}, (mp_float_t)M_PI};
#endif
#define MICROPY_FLOAT_ZERO MICROPY_FLOAT_CONST(0.0)
#if MICROPY_FLOAT_HIGH_QUALITY_HASH
// must return actual integer value if it fits in mp_int_t
mp_int_t mp_float_hash(mp_float_t src) {
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
typedef uint64_t mp_float_uint_t;
#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
typedef uint32_t mp_float_uint_t;
#endif
union {
mp_float_t f;
#if MP_ENDIANNESS_LITTLE
struct { mp_float_uint_t frc:MP_FLOAT_FRAC_BITS, exp:MP_FLOAT_EXP_BITS, sgn:1; } p;
#else
struct { mp_float_uint_t sgn:1, exp:MP_FLOAT_EXP_BITS, frc:MP_FLOAT_FRAC_BITS; } p;
#endif
mp_float_uint_t i;
} u = {.f = src};
mp_float_union_t u = {.f = src};
mp_int_t val;
const int adj_exp = (int)u.p.exp - MP_FLOAT_EXP_BIAS;
if (adj_exp < 0) {
@@ -89,7 +77,7 @@ typedef uint32_t mp_float_uint_t;
// number may have a fraction; xor the integer part with the fractional part
val = (frc >> (MP_FLOAT_FRAC_BITS - adj_exp))
^ (frc & (((mp_float_uint_t)1 << (MP_FLOAT_FRAC_BITS - adj_exp)) - 1));
} else if ((unsigned int)adj_exp < BITS_PER_BYTE * sizeof(mp_int_t) - 1) {
} else if ((unsigned int)adj_exp < MP_BITS_PER_BYTE * sizeof(mp_int_t) - 1) {
// the number is a (big) whole integer and will fit in val's signed-width
val = (mp_int_t)frc << (adj_exp - MP_FLOAT_FRAC_BITS);
} else {
@@ -109,17 +97,17 @@ typedef uint32_t mp_float_uint_t;
STATIC void float_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
(void)kind;
mp_float_t o_val = mp_obj_float_get(o_in);
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
char buf[16];
#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
const int precision = 6;
#else
const int precision = 7;
#endif
#else
#else
char buf[32];
const int precision = 16;
#endif
#endif
mp_format_float(o_val, buf, sizeof(buf), 'g', precision, '\0');
mp_print_str(print, buf);
if (strchr(buf, '.') == NULL && strchr(buf, 'e') == NULL && strchr(buf, 'n') == NULL) {
@@ -156,10 +144,14 @@ STATIC mp_obj_t float_make_new(const mp_obj_type_t *type_in, size_t n_args, size
STATIC mp_obj_t float_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
mp_float_t val = mp_obj_float_get(o_in);
switch (op) {
case MP_UNARY_OP_BOOL: return mp_obj_new_bool(val != 0);
case MP_UNARY_OP_HASH: return MP_OBJ_NEW_SMALL_INT(mp_float_hash(val));
case MP_UNARY_OP_POSITIVE: return o_in;
case MP_UNARY_OP_NEGATIVE: return mp_obj_new_float(-val);
case MP_UNARY_OP_BOOL:
return mp_obj_new_bool(val != 0);
case MP_UNARY_OP_HASH:
return MP_OBJ_NEW_SMALL_INT(mp_float_hash(val));
case MP_UNARY_OP_POSITIVE:
return o_in;
case MP_UNARY_OP_NEGATIVE:
return mp_obj_new_float(-val);
case MP_UNARY_OP_ABS: {
if (signbit(val)) {
return mp_obj_new_float(-val);
@@ -167,24 +159,24 @@ STATIC mp_obj_t float_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
return o_in;
}
}
default: return MP_OBJ_NULL; // op not supported
default:
return MP_OBJ_NULL; // op not supported
}
}
STATIC mp_obj_t float_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
mp_float_t lhs_val = mp_obj_float_get(lhs_in);
#if MICROPY_PY_BUILTINS_COMPLEX
#if MICROPY_PY_BUILTINS_COMPLEX
if (mp_obj_is_type(rhs_in, &mp_type_complex)) {
return mp_obj_complex_binary_op(op, lhs_val, 0, rhs_in);
} else
#endif
{
return mp_obj_float_binary_op(op, lhs_val, rhs_in);
}
#endif
return mp_obj_float_binary_op(op, lhs_val, rhs_in);
}
const mp_obj_type_t mp_type_float = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_EQ_NOT_REFLEXIVE | MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE,
.name = MP_QSTR_float,
.print = float_print,
.make_new = float_make_new,
@@ -218,24 +210,24 @@ STATIC void mp_obj_float_divmod(mp_float_t *x, mp_float_t *y) {
mp_float_t div = (*x - mod) / *y;
// Python specs require that mod has same sign as second operand
if (mod == 0.0) {
mod = MICROPY_FLOAT_C_FUN(copysign)(0.0, *y);
if (mod == MICROPY_FLOAT_ZERO) {
mod = MICROPY_FLOAT_C_FUN(copysign)(MICROPY_FLOAT_ZERO, *y);
} else {
if ((mod < 0.0) != (*y < 0.0)) {
if ((mod < MICROPY_FLOAT_ZERO) != (*y < MICROPY_FLOAT_ZERO)) {
mod += *y;
div -= 1.0;
div -= MICROPY_FLOAT_CONST(1.0);
}
}
mp_float_t floordiv;
if (div == 0.0) {
if (div == MICROPY_FLOAT_ZERO) {
// if division is zero, take the correct sign of zero
floordiv = MICROPY_FLOAT_C_FUN(copysign)(0.0, *x / *y);
floordiv = MICROPY_FLOAT_C_FUN(copysign)(MICROPY_FLOAT_ZERO, *x / *y);
} else {
// Python specs require that x == (x//y)*y + (x%y)
floordiv = MICROPY_FLOAT_C_FUN(floor)(div);
if (div - floordiv > 0.5) {
floordiv += 1.0;
if (div - floordiv > MICROPY_FLOAT_CONST(0.5)) {
floordiv += MICROPY_FLOAT_CONST(1.0);
}
}
@@ -252,16 +244,22 @@ mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t
switch (op) {
case MP_BINARY_OP_ADD:
case MP_BINARY_OP_INPLACE_ADD: lhs_val += rhs_val; break;
case MP_BINARY_OP_INPLACE_ADD:
lhs_val += rhs_val;
break;
case MP_BINARY_OP_SUBTRACT:
case MP_BINARY_OP_INPLACE_SUBTRACT: lhs_val -= rhs_val; break;
case MP_BINARY_OP_INPLACE_SUBTRACT:
lhs_val -= rhs_val;
break;
case MP_BINARY_OP_MULTIPLY:
case MP_BINARY_OP_INPLACE_MULTIPLY: lhs_val *= rhs_val; break;
case MP_BINARY_OP_INPLACE_MULTIPLY:
lhs_val *= rhs_val;
break;
case MP_BINARY_OP_FLOOR_DIVIDE:
case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
if (rhs_val == 0) {
zero_division_error:
mp_raise_msg(&mp_type_ZeroDivisionError, "divide by zero");
zero_division_error:
mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("divide by zero"));
}
// Python specs require that x == (x//y)*y + (x%y) so we must
// call divmod to compute the correct floor division, which
@@ -277,15 +275,15 @@ mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t
break;
case MP_BINARY_OP_MODULO:
case MP_BINARY_OP_INPLACE_MODULO:
if (rhs_val == 0) {
if (rhs_val == MICROPY_FLOAT_ZERO) {
goto zero_division_error;
}
lhs_val = MICROPY_FLOAT_C_FUN(fmod)(lhs_val, rhs_val);
// Python specs require that mod has same sign as second operand
if (lhs_val == 0.0) {
if (lhs_val == MICROPY_FLOAT_ZERO) {
lhs_val = MICROPY_FLOAT_C_FUN(copysign)(0.0, rhs_val);
} else {
if ((lhs_val < 0.0) != (rhs_val < 0.0)) {
if ((lhs_val < MICROPY_FLOAT_ZERO) != (rhs_val < MICROPY_FLOAT_ZERO)) {
lhs_val += rhs_val;
}
}
@@ -295,13 +293,19 @@ mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t
if (lhs_val == 0 && rhs_val < 0 && !isinf(rhs_val)) {
goto zero_division_error;
}
if (lhs_val < 0 && rhs_val != MICROPY_FLOAT_C_FUN(floor)(rhs_val)) {
if (lhs_val < 0 && rhs_val != MICROPY_FLOAT_C_FUN(floor)(rhs_val) && !isnan(rhs_val)) {
#if MICROPY_PY_BUILTINS_COMPLEX
return mp_obj_complex_binary_op(MP_BINARY_OP_POWER, lhs_val, 0, rhs_in);
#else
mp_raise_ValueError("complex values not supported");
mp_raise_ValueError(MP_ERROR_TEXT("complex values not supported"));
#endif
}
#if MICROPY_PY_MATH_POW_FIX_NAN // Also see modmath.c.
if (lhs_val == MICROPY_FLOAT_CONST(1.0) || rhs_val == MICROPY_FLOAT_CONST(0.0)) {
lhs_val = MICROPY_FLOAT_CONST(1.0);
break;
}
#endif
lhs_val = MICROPY_FLOAT_C_FUN(pow)(lhs_val, rhs_val);
break;
case MP_BINARY_OP_DIVMOD: {
@@ -315,11 +319,16 @@ mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t
};
return mp_obj_new_tuple(2, tuple);
}
case MP_BINARY_OP_LESS: return mp_obj_new_bool(lhs_val < rhs_val);
case MP_BINARY_OP_MORE: return mp_obj_new_bool(lhs_val > rhs_val);
case MP_BINARY_OP_EQUAL: return mp_obj_new_bool(lhs_val == rhs_val);
case MP_BINARY_OP_LESS_EQUAL: return mp_obj_new_bool(lhs_val <= rhs_val);
case MP_BINARY_OP_MORE_EQUAL: return mp_obj_new_bool(lhs_val >= rhs_val);
case MP_BINARY_OP_LESS:
return mp_obj_new_bool(lhs_val < rhs_val);
case MP_BINARY_OP_MORE:
return mp_obj_new_bool(lhs_val > rhs_val);
case MP_BINARY_OP_EQUAL:
return mp_obj_new_bool(lhs_val == rhs_val);
case MP_BINARY_OP_LESS_EQUAL:
return mp_obj_new_bool(lhs_val <= rhs_val);
case MP_BINARY_OP_MORE_EQUAL:
return mp_obj_new_bool(lhs_val >= rhs_val);
default:
return MP_OBJ_NULL; // op not supported

View File

@@ -58,6 +58,7 @@ STATIC mp_obj_t fun_builtin_0_call(mp_obj_t self_in, size_t n_args, size_t n_kw,
const mp_obj_type_t mp_type_fun_builtin_0 = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = fun_builtin_0_call,
.unary_op = mp_generic_unary_op,
@@ -72,6 +73,7 @@ STATIC mp_obj_t fun_builtin_1_call(mp_obj_t self_in, size_t n_args, size_t n_kw,
const mp_obj_type_t mp_type_fun_builtin_1 = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = fun_builtin_1_call,
.unary_op = mp_generic_unary_op,
@@ -86,6 +88,7 @@ STATIC mp_obj_t fun_builtin_2_call(mp_obj_t self_in, size_t n_args, size_t n_kw,
const mp_obj_type_t mp_type_fun_builtin_2 = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = fun_builtin_2_call,
.unary_op = mp_generic_unary_op,
@@ -100,6 +103,7 @@ STATIC mp_obj_t fun_builtin_3_call(mp_obj_t self_in, size_t n_args, size_t n_kw,
const mp_obj_type_t mp_type_fun_builtin_3 = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = fun_builtin_3_call,
.unary_op = mp_generic_unary_op,
@@ -130,6 +134,7 @@ STATIC mp_obj_t fun_builtin_var_call(mp_obj_t self_in, size_t n_args, size_t n_k
const mp_obj_type_t mp_type_fun_builtin_var = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN,
.name = MP_QSTR_function,
.call = fun_builtin_var_call,
.unary_op = mp_generic_unary_op,
@@ -188,17 +193,18 @@ STATIC void dump_args(const mp_obj_t *a, size_t sz) {
// With this macro you can tune the maximum number of function state bytes
// that will be allocated on the stack. Any function that needs more
// than this will try to use the heap, with fallback to stack allocation.
#define VM_MAX_STATE_ON_STACK (11 * sizeof(mp_uint_t))
#define VM_MAX_STATE_ON_STACK (sizeof(mp_uint_t) * 11)
#define DECODE_CODESTATE_SIZE(bytecode, n_state_out_var, state_size_out_var) \
{ \
const uint8_t *ip = bytecode; \
size_t n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_args; \
MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state_out_var, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_args); \
\
(void)scope_flags; (void)n_pos_args; (void)n_kwonly_args; (void)n_def_args; \
\
/* state size in bytes */ \
state_size_out_var = n_state_out_var * sizeof(mp_obj_t) \
+ n_exc_stack * sizeof(mp_exc_stack_t); \
+ n_exc_stack * sizeof(mp_exc_stack_t); \
}
#define INIT_CODESTATE(code_state, _fun_bc, _n_state, n_args, n_kw, args) \
@@ -350,20 +356,25 @@ void mp_obj_fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
if (attr == MP_QSTR___name__) {
dest[0] = MP_OBJ_NEW_QSTR(mp_obj_fun_get_name(self_in));
}
if (attr == MP_QSTR___globals__) {
mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
dest[0] = MP_OBJ_FROM_PTR(self->globals);
}
}
#endif
const mp_obj_type_t mp_type_fun_bc = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF,
.name = MP_QSTR_function,
#if MICROPY_CPYTHON_COMPAT
#if MICROPY_CPYTHON_COMPAT
.print = fun_bc_print,
#endif
#endif
.call = fun_bc_call,
.unary_op = mp_generic_unary_op,
#if MICROPY_PY_FUNCTION_ATTRS
#if MICROPY_PY_FUNCTION_ATTRS
.attr = mp_obj_fun_bc_attr,
#endif
#endif
};
mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args_in, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table) {
@@ -400,19 +411,20 @@ mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args_in, mp_obj_t def_kw_args, const byt
STATIC mp_obj_t fun_native_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
MP_STACK_CHECK();
mp_obj_fun_bc_t *self = self_in;
mp_call_fun_t fun = MICROPY_MAKE_POINTER_CALLABLE((void*)self->bytecode);
mp_call_fun_t fun = MICROPY_MAKE_POINTER_CALLABLE((void *)self->bytecode);
return fun(self_in, n_args, n_kw, args);
}
STATIC const mp_obj_type_t mp_type_fun_native = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF,
.name = MP_QSTR_function,
.call = fun_native_call,
.unary_op = mp_generic_unary_op,
};
mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table) {
mp_obj_fun_bc_t *o = mp_obj_new_fun_bc(def_args_in, def_kw_args, (const byte*)fun_data, const_table);
mp_obj_fun_bc_t *o = mp_obj_new_fun_bc(def_args_in, def_kw_args, (const byte *)fun_data, const_table);
o->base.type = &mp_type_fun_native;
return o;
}
@@ -455,13 +467,13 @@ STATIC mp_uint_t convert_obj_for_inline_asm(mp_obj_t obj) {
size_t l;
return (mp_uint_t)mp_obj_str_get_data(obj, &l);
} else {
mp_obj_type_t *type = mp_obj_get_type(obj);
#if MICROPY_PY_BUILTINS_FLOAT
const mp_obj_type_t *type = mp_obj_get_type(obj);
#if MICROPY_PY_BUILTINS_FLOAT
if (type == &mp_type_float) {
// convert float to int (could also pass in float registers)
return (mp_int_t)mp_obj_float_get(obj);
} else
#endif
}
#endif
if (type == &mp_type_tuple || type == &mp_type_list) {
// pointer to start of tuple (could pass length, but then could use len(x) for that)
size_t len;
@@ -505,7 +517,7 @@ STATIC mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const
convert_obj_for_inline_asm(args[1]),
convert_obj_for_inline_asm(args[2]),
convert_obj_for_inline_asm(args[3])
);
);
}
return mp_native_to_obj(ret, self->type_sig);
@@ -513,6 +525,7 @@ STATIC mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const
STATIC const mp_obj_type_t mp_type_fun_asm = {
{ &mp_type_type },
.flags = MP_TYPE_FLAG_BINDS_SELF,
.name = MP_QSTR_function,
.call = fun_asm_call,
.unary_op = mp_generic_unary_op,

Some files were not shown because too many files have changed in this diff Show More