mirror of
https://github.com/UpsilonNumworks/Upsilon.git
synced 2026-01-19 00:37:25 +01:00
Fix spelling (#128)
* Fix spelling in .cpp files * Fix spelling in all files
This commit is contained in:
@@ -705,7 +705,7 @@ STATIC void compile_funcdef_lambdef_param(compiler_t *comp, mp_parse_node_t pn)
|
||||
|
||||
} else {
|
||||
// this parameter has a default value
|
||||
// in CPython, None (and True, False?) as default parameters are loaded with LOAD_NAME; don't understandy why
|
||||
// in CPython, None (and True, False?) as default parameters are loaded with LOAD_NAME; don't understand why
|
||||
|
||||
if (comp->have_star) {
|
||||
comp->num_dict_params += 1;
|
||||
|
||||
@@ -574,7 +574,7 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
|
||||
if (emit->pass == MP_PASS_CODE_SIZE) {
|
||||
// Commit to the encoding size based on the value of prelude_offset in this pass.
|
||||
// By using 32768 as the cut-off it is highly unlikely that prelude_offset will
|
||||
// grow beyond 65535 by the end of thiss pass, and so require the larger encoding.
|
||||
// grow beyond 65535 by the end of this pass, and so require the larger encoding.
|
||||
emit->prelude_offset_uses_u16_encoding = emit->prelude_offset < 32768;
|
||||
}
|
||||
if (emit->prelude_offset_uses_u16_encoding) {
|
||||
@@ -874,7 +874,7 @@ STATIC vtype_kind_t load_reg_stack_imm(emit_t *emit, int reg_dest, const stack_i
|
||||
}
|
||||
}
|
||||
|
||||
// Copies all unsettled registers and immediates that are Python values into the
|
||||
// Copies all unsettled registers and immediate that are Python values into the
|
||||
// concrete Python stack. This ensures the concrete Python stack holds valid
|
||||
// values for the current stack_size.
|
||||
// This function may clobber REG_TEMP1.
|
||||
@@ -1070,7 +1070,7 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de
|
||||
}
|
||||
}
|
||||
|
||||
// Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
|
||||
// Adjust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
|
||||
adjust_stack(emit, -n_pop);
|
||||
emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size);
|
||||
}
|
||||
|
||||
@@ -318,7 +318,7 @@ int mp_format_float(FPTYPE f, char *buf, size_t buf_size, char fmt, int prec, ch
|
||||
|
||||
// We now have num.f as a floating point number between >= 1 and < 10
|
||||
// (or equal to zero), and e contains the absolute value of the power of
|
||||
// 10 exponent. and (dec + 1) == the number of dgits before the decimal.
|
||||
// 10 exponent. and (dec + 1) == the number of digits before the decimal.
|
||||
|
||||
// For e, prec is # digits after the decimal
|
||||
// For f, prec is # digits after the decimal
|
||||
|
||||
@@ -512,7 +512,7 @@ found:
|
||||
// Set last free ATB index to block after last block we found, for start of
|
||||
// next scan. To reduce fragmentation, we only do this if we were looking
|
||||
// for a single free block, which guarantees that there are no free blocks
|
||||
// before this one. Also, whenever we free or shink a block we must check
|
||||
// before this one. Also, whenever we free or shrink a block we must check
|
||||
// if this index needs adjusting (see gc_realloc and gc_free).
|
||||
if (n_free == 1) {
|
||||
MP_STATE_MEM(gc_last_free_atb_index) = (i + 1) / BLOCKS_PER_ATB;
|
||||
|
||||
@@ -594,7 +594,7 @@ void mp_lexer_to_next(mp_lexer_t *lex) {
|
||||
// a string or bytes literal
|
||||
|
||||
// Python requires adjacent string/bytes literals to be automatically
|
||||
// concatenated. We do it here in the tokeniser to make efficient use of RAM,
|
||||
// concatenated. We do it here in the tokenizer to make efficient use of RAM,
|
||||
// because then the lexer's vstr can be used to accumulate the string literal,
|
||||
// in contrast to creating a parse tree of strings and then joining them later
|
||||
// in the compiler. It's also more compact in code size to do it here.
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
#include "py/qstr.h"
|
||||
#include "py/reader.h"
|
||||
|
||||
/* lexer.h -- simple tokeniser for MicroPython
|
||||
/* lexer.h -- simple tokenizer for MicroPython
|
||||
*
|
||||
* Uses (byte) length instead of null termination.
|
||||
* Tokens are the same - UTF-8 with (byte) length.
|
||||
|
||||
@@ -24,7 +24,7 @@ def check_non_ascii(msg):
|
||||
|
||||
|
||||
# Replace <char><space> with <char | 0x80>.
|
||||
# Trival scheme to demo/test.
|
||||
# Trivial scheme to demo/test.
|
||||
def space_compression(error_strings):
|
||||
for line in error_strings:
|
||||
check_non_ascii(line)
|
||||
|
||||
@@ -16,7 +16,7 @@ endif
|
||||
|
||||
# QSTR generation uses the same CFLAGS, with these modifications.
|
||||
QSTR_GEN_FLAGS = -DNO_QSTR
|
||||
# Note: := to force evalulation immediately.
|
||||
# Note: := to force evaluation immediately.
|
||||
QSTR_GEN_CFLAGS := $(CFLAGS)
|
||||
QSTR_GEN_CFLAGS += $(QSTR_GEN_FLAGS)
|
||||
QSTR_GEN_CXXFLAGS := $(CXXFLAGS)
|
||||
@@ -28,7 +28,7 @@ QSTR_GEN_CXXFLAGS += $(QSTR_GEN_FLAGS)
|
||||
# tree.
|
||||
#
|
||||
# So for example, py/map.c would have an object file name py/map.o
|
||||
# The object files will go into the build directory and mantain the same
|
||||
# The object files will go into the build directory and maintain the same
|
||||
# directory structure as the source tree. So the final dependency will look
|
||||
# like this:
|
||||
#
|
||||
@@ -184,7 +184,7 @@ endif
|
||||
ifneq ($(PROG),)
|
||||
# Build a standalone executable (unix does this)
|
||||
|
||||
# The executable should have an .exe extension for builds targetting 'pure'
|
||||
# The executable should have an .exe extension for builds targeting 'pure'
|
||||
# Windows, i.e. msvc or mingw builds, but not when using msys or cygwin's gcc.
|
||||
COMPILER_TARGET := $(shell $(CC) -dumpmachine)
|
||||
ifneq (,$(findstring mingw,$(COMPILER_TARGET)))
|
||||
|
||||
@@ -79,7 +79,7 @@ STATIC mp_obj_t mp_builtin___build_class__(size_t n_args, const mp_obj_t *args)
|
||||
meta_args[2] = class_locals; // dict of members
|
||||
mp_obj_t new_class = mp_call_function_n_kw(meta, 3, 0, meta_args);
|
||||
|
||||
// store into cell if neede
|
||||
// store into cell if needed
|
||||
if (cell != mp_const_none) {
|
||||
mp_obj_cell_set(cell, new_class);
|
||||
}
|
||||
|
||||
@@ -1221,7 +1221,7 @@ typedef double mp_float_t;
|
||||
// the semantics of CPython's pkg_resources.resource_stream()
|
||||
// (allows to access binary resources in frozen source packages).
|
||||
// Note that the same functionality can be achieved in "pure
|
||||
// Python" by prepocessing binary resources into Python source
|
||||
// Python" by preprocessing binary resources into Python source
|
||||
// and bytecode-freezing it (with a simple helper module available
|
||||
// e.g. in micropython-lib).
|
||||
#ifndef MICROPY_PY_IO_RESOURCE_STREAM
|
||||
|
||||
@@ -279,7 +279,7 @@ mp_obj_t mp_obj_equal_not_equal(mp_binary_op_t op, mp_obj_t o1, mp_obj_t o2) {
|
||||
o2 = temp;
|
||||
}
|
||||
|
||||
// equality not implemented, so fall back to pointer conparison
|
||||
// equality not implemented, so fall back to pointer comparison
|
||||
return (o1 == o2) ? local_true : local_false;
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ mp_obj_t mp_parse_num_integer(const char *restrict str_, size_t len, int base, m
|
||||
break;
|
||||
}
|
||||
|
||||
// add next digi and check for overflow
|
||||
// add next digit and check for overflow
|
||||
if (mp_small_int_mul_overflow(int_val, base)) {
|
||||
goto overflow;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user